Merge tag 'pci-v5.6-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Feb 2020 14:17:38 +0000 (14:17 +0000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Feb 2020 14:17:38 +0000 (14:17 +0000)
Pull PCI fixes from Bjorn Helgaas:

 - Define to_pci_sysdata() always to fix build breakage when !CONFIG_PCI
   (Jason A. Donenfeld)

 - Use PF PASID for VFs to fix VF IOMMU bind failures (Kuppuswamy
   Sathyanarayanan)

* tag 'pci-v5.6-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci:
  PCI/ATS: Use PF PASID for VFs
  x86/PCI: Define to_pci_sysdata() even when !CONFIG_PCI

1035 files changed:
.mailmap
Documentation/ABI/testing/rtc-cdev
Documentation/ABI/testing/sysfs-driver-pciback
Documentation/ABI/testing/sysfs-driver-xen-blkback
Documentation/admin-guide/bootconfig.rst [new file with mode: 0644]
Documentation/admin-guide/index.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/arm/fsl.yaml
Documentation/devicetree/bindings/arm/qcom.yaml
Documentation/devicetree/bindings/clock/amlogic,meson8-ddr-clkc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
Documentation/devicetree/bindings/clock/fsl,plldig.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/imx8mn-clock.yaml
Documentation/devicetree/bindings/clock/imx8mp-clock.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
Documentation/devicetree/bindings/clock/qcom,dispcc.txt [deleted file]
Documentation/devicetree/bindings/clock/qcom,dispcc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,gcc.yaml
Documentation/devicetree/bindings/clock/qcom,gpucc.txt [deleted file]
Documentation/devicetree/bindings/clock/qcom,gpucc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,mmcc.txt [deleted file]
Documentation/devicetree/bindings/clock/qcom,mmcc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
Documentation/devicetree/bindings/clock/qcom,videocc.txt [deleted file]
Documentation/devicetree/bindings/clock/qcom,videocc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
Documentation/devicetree/bindings/clock/ti-clkctrl.txt
Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
Documentation/devicetree/bindings/input/gpio-vibrator.yaml
Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
Documentation/devicetree/bindings/input/touchscreen/goodix.txt [deleted file]
Documentation/devicetree/bindings/input/touchscreen/goodix.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iommu/iommu.txt
Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
Documentation/devicetree/bindings/pwm/mxs-pwm.txt
Documentation/devicetree/bindings/remoteproc/mtk,scp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt [deleted file]
Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/spi/fsl-spi.txt
Documentation/driver-api/dmaengine/client.rst
Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
Documentation/memory-barriers.txt
Documentation/networking/nf_flowtable.txt
Documentation/powerpc/imc.rst [new file with mode: 0644]
Documentation/powerpc/index.rst
Documentation/powerpc/papr_hcalls.rst [new file with mode: 0644]
Documentation/trace/boottime-trace.rst [new file with mode: 0644]
Documentation/trace/events.rst
Documentation/trace/index.rst
Documentation/trace/kprobetrace.rst
MAINTAINERS
arch/Kconfig
arch/alpha/kernel/srm_env.c
arch/arc/include/asm/pgtable.h
arch/arm/Kconfig
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/dra7-evm-common.dtsi
arch/arm/boot/dts/dra72-evm-common.dtsi
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable-nommu.h
arch/arm/include/asm/tlb.h
arch/arm/kernel/atags_proc.c
arch/arm/kernel/stacktrace.c
arch/arm/kernel/traps.c
arch/arm/mm/alignment.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/arm64/Kconfig
arch/arm64/Kconfig.debug
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/ptdump.h
arch/arm64/mm/Makefile
arch/arm64/mm/dump.c
arch/arm64/mm/mmu.c
arch/arm64/mm/ptdump_debugfs.c
arch/csky/include/asm/Kbuild
arch/ia64/kernel/salinfo.c
arch/m68k/configs/amcore_defconfig
arch/m68k/configs/m5208evb_defconfig
arch/m68k/configs/m5249evb_defconfig
arch/m68k/configs/m5272c3_defconfig
arch/m68k/configs/m5275evb_defconfig
arch/m68k/configs/m5307c3_defconfig
arch/m68k/configs/m5407c3_defconfig
arch/m68k/configs/m5475evb_defconfig
arch/m68k/include/asm/uaccess_no.h
arch/m68k/kernel/bootinfo_proc.c
arch/microblaze/Kconfig
arch/microblaze/configs/mmu_defconfig
arch/microblaze/configs/nommu_defconfig
arch/microblaze/kernel/cpu/cache.c
arch/microblaze/kernel/cpu/cpuinfo.c
arch/microblaze/kernel/head.S
arch/microblaze/mm/init.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/pgtable.h
arch/mips/lasat/picvue_proc.c
arch/parisc/configs/712_defconfig [deleted file]
arch/parisc/configs/a500_defconfig [deleted file]
arch/parisc/configs/b180_defconfig [deleted file]
arch/parisc/configs/c3000_defconfig [deleted file]
arch/parisc/configs/c8000_defconfig [deleted file]
arch/parisc/configs/defconfig [deleted file]
arch/parisc/configs/generic-32bit_defconfig
arch/parisc/configs/generic-64bit_defconfig
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/Makefile.postlink
arch/powerpc/boot/4xx.c
arch/powerpc/boot/dts/mgcoge.dts
arch/powerpc/boot/dts/mpc832x_rdb.dts
arch/powerpc/boot/dts/mpc8610_hpcd.dts
arch/powerpc/configs/44x/akebono_defconfig
arch/powerpc/configs/44x/sam440ep_defconfig
arch/powerpc/configs/52xx/pcm030_defconfig
arch/powerpc/configs/83xx/kmeter1_defconfig
arch/powerpc/configs/adder875_defconfig
arch/powerpc/configs/ep8248e_defconfig
arch/powerpc/configs/ep88xc_defconfig
arch/powerpc/configs/mgcoge_defconfig
arch/powerpc/configs/mpc512x_defconfig
arch/powerpc/configs/mpc885_ads_defconfig
arch/powerpc/configs/powernv_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/configs/skiroot_defconfig
arch/powerpc/configs/storcenter_defconfig
arch/powerpc/configs/tqm8xx_defconfig
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/32/pgtable.h
arch/powerpc/include/asm/book3s/64/kup-radix.h
arch/powerpc/include/asm/book3s/64/pgalloc.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/firmware.h
arch/powerpc/include/asm/hw_breakpoint.h
arch/powerpc/include/asm/kasan.h
arch/powerpc/include/asm/kup.h
arch/powerpc/include/asm/nohash/32/kup-8xx.h
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/pgalloc.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/pci.h
arch/powerpc/include/asm/pgtable.h
arch/powerpc/include/asm/pnv-pci.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/reg_8xx.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/asm/tlb.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/include/asm/vdso_datapage.h
arch/powerpc/include/asm/xive.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/eeh_cache.c
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/eeh_sysfs.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/fpu.S
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/head_32.h
arch/powerpc/kernel/head_40x.S
arch/powerpc/kernel/head_8xx.S
arch/powerpc/kernel/head_booke.h
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/hw_breakpoint.c
arch/powerpc/kernel/idle.c
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/idle_power4.S [deleted file]
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/pci-hotplug.c
arch/powerpc/kernel/pci_dn.c
arch/powerpc/kernel/pci_of_scan.c
arch/powerpc/kernel/proc_powerpc.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/rtas-proc.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/rtasd.c
arch/powerpc/kernel/setup.h
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/vdso32/Makefile
arch/powerpc/kernel/vdso32/cacheflush.S
arch/powerpc/kernel/vdso32/datapage.S
arch/powerpc/kernel/vdso32/getcpu.S
arch/powerpc/kernel/vdso32/gettimeofday.S
arch/powerpc/kernel/vdso32/vdso32.lds.S
arch/powerpc/kernel/vector.S
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/mm/book3s32/hash_low.S
arch/powerpc/mm/book3s32/mmu.c
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/mm/book3s64/pgtable.c
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/powerpc/mm/book3s64/radix_tlb.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/kasan/kasan_init_32.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/mmu_decl.h
arch/powerpc/mm/nohash/8xx.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/ptdump/ptdump.c
arch/powerpc/oprofile/backtrace.c
arch/powerpc/perf/8xx-pmu.c
arch/powerpc/perf/callchain.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
arch/powerpc/platforms/83xx/km83xx.c
arch/powerpc/platforms/85xx/smp.c
arch/powerpc/platforms/85xx/twr_p102x.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/Kconfig.cputype
arch/powerpc/platforms/maple/setup.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/Kconfig
arch/powerpc/platforms/pseries/firmware.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/lparcfg.c
arch/powerpc/platforms/pseries/papr_scm.c
arch/powerpc/platforms/pseries/pci.c
arch/powerpc/platforms/pseries/reconfig.c
arch/powerpc/platforms/pseries/scanlog.c
arch/powerpc/platforms/pseries/vio.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/tools/relocs_check.sh
arch/powerpc/xmon/dis-asm.h
arch/powerpc/xmon/xmon.c
arch/riscv/include/asm/Kbuild
arch/riscv/include/asm/pgtable-64.h
arch/riscv/include/asm/pgtable.h
arch/s390/Kconfig
arch/s390/crypto/paes_s390.c
arch/s390/include/asm/Kbuild
arch/s390/include/asm/kprobes.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pgtable.h
arch/s390/include/uapi/asm/pkey.h
arch/s390/include/uapi/asm/zcrypt.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/mcount.S
arch/s390/mm/hugetlbpage.c
arch/sh/mm/alignment.c
arch/sparc/Kconfig
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/tlb_64.h
arch/sparc/kernel/led.c
arch/um/drivers/mconsole_kern.c
arch/um/kernel/exitcode.c
arch/um/kernel/process.c
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/include/asm/Kbuild
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/tlb.h
arch/x86/kernel/cpu/mtrr/if.c
arch/x86/mm/Makefile
arch/x86/mm/debug_pagetables.c
arch/x86/mm/dump_pagetables.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/uv/tlb_uv.c
arch/xtensa/include/asm/Kbuild
arch/xtensa/platforms/iss/simdisk.c
block/bfq-cgroup.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bfq-wf2q.c
crypto/af_alg.c
drivers/acpi/arm64/iort.c
drivers/acpi/battery.c
drivers/acpi/proc.c
drivers/acpi/scan.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/pata_arasan_cf.c
drivers/ata/pata_pcmcia.c
drivers/base/memory.c
drivers/block/brd.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_worker.c
drivers/block/nbd.c
drivers/block/null_blk_main.c
drivers/block/rbd.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/char/hw_random/bcm2835-rng.c
drivers/char/hw_random/omap-rng.c
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/at91/clk-sam9x60-pll.c
drivers/clk/at91/sam9x60.c
drivers/clk/clk-asm9260.c
drivers/clk/clk-bm1880.c
drivers/clk/clk-composite.c
drivers/clk/clk-divider.c
drivers/clk/clk-fixed-rate.c
drivers/clk/clk-fsl-sai.c [new file with mode: 0644]
drivers/clk/clk-gate.c
drivers/clk/clk-gpio.c
drivers/clk/clk-mux.c
drivers/clk/clk-plldig.c [new file with mode: 0644]
drivers/clk/clk-qoriq.c
drivers/clk/clk.c
drivers/clk/imx/Kconfig
drivers/clk/imx/Makefile
drivers/clk/imx/clk-composite-7ulp.c
drivers/clk/imx/clk-composite-8m.c
drivers/clk/imx/clk-divider-gate.c
drivers/clk/imx/clk-frac-pll.c
drivers/clk/imx/clk-imx6q.c
drivers/clk/imx/clk-imx7ulp.c
drivers/clk/imx/clk-imx8mm.c
drivers/clk/imx/clk-imx8mn.c
drivers/clk/imx/clk-imx8mp.c [new file with mode: 0644]
drivers/clk/imx/clk-imx8mq.c
drivers/clk/imx/clk-imx8qxp-lpcg.c
drivers/clk/imx/clk-pfdv2.c
drivers/clk/imx/clk-pll14xx.c
drivers/clk/imx/clk-pllv1.c
drivers/clk/imx/clk-pllv2.c
drivers/clk/imx/clk-pllv4.c
drivers/clk/imx/clk-sccg-pll.c [deleted file]
drivers/clk/imx/clk-sscg-pll.c [new file with mode: 0644]
drivers/clk/imx/clk.c
drivers/clk/imx/clk.h
drivers/clk/mediatek/Kconfig
drivers/clk/meson/Makefile
drivers/clk/meson/clk-mpll.c
drivers/clk/meson/clk-phase.c
drivers/clk/meson/clk-pll.c
drivers/clk/meson/g12a.c
drivers/clk/meson/meson8-ddr.c [new file with mode: 0644]
drivers/clk/meson/meson8b.c
drivers/clk/meson/sclk-div.c
drivers/clk/microchip/clk-core.c
drivers/clk/mmp/clk-frac.c
drivers/clk/mmp/clk-mix.c
drivers/clk/mvebu/Kconfig
drivers/clk/qcom/Kconfig
drivers/clk/qcom/Makefile
drivers/clk/qcom/apcs-msm8916.c
drivers/clk/qcom/clk-alpha-pll.c
drivers/clk/qcom/clk-alpha-pll.h
drivers/clk/qcom/clk-hfpll.c
drivers/clk/qcom/clk-rcg.h
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-rpmh.c
drivers/clk/qcom/clk-smd-rpm.c
drivers/clk/qcom/dispcc-sc7180.c [new file with mode: 0644]
drivers/clk/qcom/dispcc-sdm845.c
drivers/clk/qcom/gcc-ipq6018.c [new file with mode: 0644]
drivers/clk/qcom/gcc-msm8996.c
drivers/clk/qcom/gcc-msm8998.c
drivers/clk/qcom/gcc-qcs404.c
drivers/clk/qcom/gpucc-sc7180.c [new file with mode: 0644]
drivers/clk/qcom/hfpll.c
drivers/clk/qcom/mmcc-msm8974.c
drivers/clk/qcom/mmcc-msm8998.c [new file with mode: 0644]
drivers/clk/qcom/videocc-sc7180.c [new file with mode: 0644]
drivers/clk/renesas/Kconfig
drivers/clk/renesas/r7s9210-cpg-mssr.c
drivers/clk/renesas/rcar-gen2-cpg.h
drivers/clk/renesas/rcar-gen3-cpg.c
drivers/clk/rockchip/clk-pll.c
drivers/clk/sunxi-ng/ccu-sun50i-a64.c
drivers/clk/sunxi-ng/ccu-sun50i-a64.h
drivers/clk/sunxi-ng/ccu-sun6i-a31.h
drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
drivers/clk/sunxi-ng/ccu-sun8i-r40.h
drivers/clk/sunxi/clk-sun6i-apb0-gates.c
drivers/clk/tegra/clk-dfll.c
drivers/clk/tegra/clk-divider.c
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra20.c
drivers/clk/tegra/clk-tegra30.c
drivers/clk/ti/clk-54xx.c
drivers/clk/ti/clk-7xx.c
drivers/clk/ti/clk.c
drivers/clk/ti/clkctrl.c
drivers/clk/ti/clock.h
drivers/clk/ti/clockdomain.c
drivers/clk/uniphier/clk-uniphier-peri.c
drivers/clk/ux500/u8500_of_clk.c
drivers/clk/versatile/Kconfig
drivers/clk/zynqmp/clkc.c
drivers/clk/zynqmp/divider.c
drivers/clk/zynqmp/pll.c
drivers/dma/dmaengine.c
drivers/dma/idxd/sysfs.c
drivers/dma/mv_xor_v2.c
drivers/firmware/efi/arm-runtime.c
drivers/firmware/iscsi_ibft.c
drivers/firmware/xilinx/zynqmp.c
drivers/gpio/gpiolib-devres.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwspinlock/omap_hwspinlock.c
drivers/hwspinlock/qcom_hwspinlock.c
drivers/hwspinlock/sirf_hwspinlock.c
drivers/hwspinlock/stm32_hwspinlock.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-synquacer.c
drivers/ide/ide-proc.c
drivers/iio/accel/cros_ec_accel_legacy.c
drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
drivers/iio/light/cros_ec_light_prox.c
drivers/iio/pressure/cros_ec_baro.c
drivers/input/input.c
drivers/input/misc/axp20x-pek.c
drivers/input/rmi4/rmi_f11.c
drivers/input/serio/Kconfig
drivers/input/serio/Makefile
drivers/input/serio/apbps2.c
drivers/input/serio/ioc3kbd.c [new file with mode: 0644]
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/elants_i2c.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/arm-smmu-impl.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/arm-smmu.h
drivers/iommu/dmar.c
drivers/iommu/intel-iommu-debugfs.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.c
drivers/iommu/intel-pasid.h
drivers/iommu/intel-svm.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable.c
drivers/iommu/iommu-sysfs.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/of_iommu.c
drivers/iommu/qcom_iommu.c
drivers/iommu/virtio-iommu.c
drivers/isdn/capi/kcapi_proc.c
drivers/macintosh/Kconfig
drivers/macintosh/via-pmu.c
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.h
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/bcache/stats.c
drivers/md/bcache/super.c
drivers/md/bcache/sysfs.c
drivers/md/md.c
drivers/media/platform/cros-ec-cec/cros-ec-cec.c
drivers/mfd/cros_ec_dev.c
drivers/misc/ocxl/Kconfig
drivers/misc/sgi-gru/gruprocfs.c
drivers/mtd/ubi/build.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
drivers/net/ethernet/pensando/ionic/ionic_if.h
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/gtp.c
drivers/net/netdevsim/bpf.c
drivers/net/netdevsim/bus.c
drivers/net/netdevsim/dev.c
drivers/net/netdevsim/health.c
drivers/net/netdevsim/netdevsim.h
drivers/net/netdevsim/sdev.c [deleted file]
drivers/net/phy/at803x.c
drivers/net/phy/mdio-mux-meson-g12a.c
drivers/net/phy/mii_timestamper.c
drivers/net/usb/r8152.c
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/intel/ipw2x00/libipw_module.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/intersil/hostap/hostap_proc.c
drivers/net/wireless/intersil/hostap/hostap_wlan.h
drivers/net/wireless/ray_cs.c
drivers/nvme/host/pci.c
drivers/nvme/target/core.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/io-cmd-bdev.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/nvmet.h
drivers/of/device.c
drivers/of/of_mdio.c
drivers/oprofile/cpu_buffer.c
drivers/parisc/led.c
drivers/pci/ats.c
drivers/pci/controller/pci-tegra.c
drivers/pci/hotplug/pnv_php.c
drivers/pci/pci.c
drivers/pci/proc.c
drivers/phy/phy-core.c
drivers/pinctrl/pxa/pinctrl-pxa2xx.c
drivers/platform/chrome/chromeos_laptop.c
drivers/platform/chrome/cros_ec.c
drivers/platform/chrome/cros_ec.h [new file with mode: 0644]
drivers/platform/chrome/cros_ec_chardev.c
drivers/platform/chrome/cros_ec_debugfs.c
drivers/platform/chrome/cros_ec_i2c.c
drivers/platform/chrome/cros_ec_ishtp.c
drivers/platform/chrome/cros_ec_lightbar.c
drivers/platform/chrome/cros_ec_lpc.c
drivers/platform/chrome/cros_ec_proto.c
drivers/platform/chrome/cros_ec_rpmsg.c
drivers/platform/chrome/cros_ec_sensorhub.c
drivers/platform/chrome/cros_ec_spi.c
drivers/platform/chrome/cros_ec_sysfs.c
drivers/platform/chrome/cros_ec_trace.c
drivers/platform/chrome/cros_ec_trace.h
drivers/platform/chrome/cros_ec_vbc.c
drivers/platform/chrome/cros_usbpd_logger.c
drivers/platform/chrome/wilco_ec/Kconfig
drivers/platform/chrome/wilco_ec/core.c
drivers/platform/chrome/wilco_ec/keyboard_leds.c
drivers/platform/chrome/wilco_ec/mailbox.c
drivers/platform/chrome/wilco_ec/telemetry.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/pnp/isapnp/proc.c
drivers/pnp/pnpbios/proc.c
drivers/power/supply/cros_usbpd-charger.c
drivers/pwm/Kconfig
drivers/pwm/core.c
drivers/pwm/pwm-atmel.c
drivers/pwm/pwm-cros-ec.c
drivers/pwm/pwm-imx27.c
drivers/pwm/pwm-mxs.c
drivers/pwm/pwm-omap-dmtimer.c
drivers/pwm/pwm-pca9685.c
drivers/pwm/pwm-rcar.c
drivers/pwm/pwm-stm32.c
drivers/pwm/pwm-sun4i.c
drivers/remoteproc/Kconfig
drivers/remoteproc/Makefile
drivers/remoteproc/mtk_common.h [new file with mode: 0644]
drivers/remoteproc/mtk_scp.c [new file with mode: 0644]
drivers/remoteproc/mtk_scp_ipi.c [new file with mode: 0644]
drivers/remoteproc/qcom_q6v5_mss.c
drivers/remoteproc/qcom_q6v5_pas.c
drivers/remoteproc/qcom_sysmon.c
drivers/remoteproc/remoteproc_core.c
drivers/rpmsg/Kconfig
drivers/rpmsg/Makefile
drivers/rpmsg/mtk_rpmsg.c [new file with mode: 0644]
drivers/rtc/Kconfig
drivers/rtc/rtc-abx80x.c
drivers/rtc/rtc-asm9260.c
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-at91rm9200.h [deleted file]
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-cros-ec.c
drivers/rtc/rtc-ds1343.c
drivers/rtc/rtc-hym8563.c
drivers/rtc/rtc-moxart.c
drivers/rtc/rtc-mt6397.c
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-pcf2127.c
drivers/rtc/rtc-pcf85063.c
drivers/rtc/rtc-pcf8523.c
drivers/rtc/rtc-pcf8563.c
drivers/rtc/rtc-rv3028.c
drivers/rtc/rtc-rv3029c2.c
drivers/rtc/rtc-rv8803.c
drivers/rtc/rtc-rx8010.c
drivers/rtc/rtc-rx8025.c
drivers/rtc/rtc-stm32.c
drivers/rtc/rtc-tps6586x.c
drivers/rtc/rtc-zynqmp.c
drivers/s390/block/dasd_proc.c
drivers/s390/cio/blacklist.c
drivers/s390/cio/css.c
drivers/s390/crypto/Makefile
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_api.h
drivers/s390/crypto/zcrypt_ccamisc.h
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_ep11misc.c [new file with mode: 0644]
drivers/s390/crypto/zcrypt_ep11misc.h [new file with mode: 0644]
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_proc.c
drivers/scsi/sg.c
drivers/spi/spi-orion.c
drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
drivers/tty/sysrq.c
drivers/usb/gadget/function/rndis.c
drivers/vfio/mdev/mdev_sysfs.c
drivers/vfio/pci/vfio_pci_nvlink2.c
drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/via/viafbdev.c
drivers/xen/gntdev.c
drivers/xen/xen-balloon.c
drivers/xen/xen-pciback/conf_space.c
drivers/xen/xen-pciback/conf_space.h
drivers/xen/xen-pciback/conf_space_capability.c
drivers/xen/xen-pciback/conf_space_header.c
drivers/xen/xen-pciback/pci_stub.c
drivers/xen/xen-pciback/pciback.h
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenbus/xenbus_probe_backend.c
drivers/zorro/proc.c
fs/aio.c
fs/attr.c
fs/ceph/Makefile
fs/ceph/acl.c
fs/ceph/caps.c
fs/ceph/debugfs.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/mdsmap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/util.c [new file with mode: 0644]
fs/ceph/xattr.c
fs/cifs/cifs_debug.c
fs/cifs/dfs_cache.c
fs/cifs/dfs_cache.h
fs/cifs/inode.c
fs/configfs/inode.c
fs/debugfs/inode.c
fs/eventfd.c
fs/ext4/super.c
fs/f2fs/file.c
fs/f2fs/node.c
fs/fat/misc.c
fs/fscache/internal.h
fs/fscache/object-list.c
fs/fscache/proc.c
fs/inode.c
fs/io_uring.c
fs/jbd2/journal.c
fs/jfs/jfs_debug.c
fs/jfs/jfs_dmap.c
fs/kernfs/inode.c
fs/libfs.c
fs/lockd/procfs.c
fs/nfsd/nfsctl.c
fs/nfsd/stats.c
fs/ntfs/inode.c
fs/ocfs2/file.c
fs/ocfs2/suballoc.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/export.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/Makefile
fs/proc/bootconfig.c [new file with mode: 0644]
fs/proc/cpuinfo.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/kcore.c
fs/proc/kmsg.c
fs/proc/page.c
fs/proc/proc_net.c
fs/proc/proc_sysctl.c
fs/proc/root.c
fs/proc/stat.c
fs/proc/task_mmu.c
fs/proc/vmcore.c
fs/read_write.c
fs/sysfs/group.c
fs/tracefs/inode.c
fs/ubifs/file.c
fs/ubifs/sb.c
fs/utimes.c
fs/xfs/libxfs/xfs_ag.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_attr_remote.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_btree.h
fs/xfs/libxfs/xfs_da_btree.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_refcount.c
fs/xfs/libxfs/xfs_sb.c
fs/xfs/scrub/agheader_repair.c
fs/xfs/scrub/fscounters.c
fs/xfs/scrub/repair.c
fs/xfs/xfs_attr_inactive.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_symlink.c
fs/xfs/xfs_trans.h
fs/xfs/xfs_trans_buf.c
include/asm-generic/Kbuild
include/asm-generic/percpu.h
include/asm-generic/pgtable.h
include/asm-generic/tlb.h
include/dt-bindings/clk/ti-dra7-atl.h [deleted file]
include/dt-bindings/clock/dra7.h
include/dt-bindings/clock/imx8mp-clock.h [new file with mode: 0644]
include/dt-bindings/clock/meson8-ddr-clkc.h [new file with mode: 0644]
include/dt-bindings/clock/omap5.h
include/dt-bindings/clock/qcom,dispcc-sc7180.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,dispcc-sdm845.h
include/dt-bindings/clock/qcom,gcc-ipq6018.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,gcc-msm8998.h
include/dt-bindings/clock/qcom,gpucc-sc7180.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,mmcc-msm8998.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,videocc-sc7180.h [new file with mode: 0644]
include/dt-bindings/clock/sun50i-a64-ccu.h
include/dt-bindings/clock/sun6i-a31-ccu.h
include/dt-bindings/clock/sun8i-a23-a33-ccu.h
include/dt-bindings/clock/sun8i-r40-ccu.h
include/dt-bindings/clock/ti-dra7-atl.h [new file with mode: 0644]
include/dt-bindings/clock/xlnx-versal-clk.h [new file with mode: 0644]
include/dt-bindings/reset/qcom,gcc-ipq6018.h [new file with mode: 0644]
include/linux/bitmap.h
include/linux/bitops.h
include/linux/bootconfig.h [new file with mode: 0644]
include/linux/ceph/mdsmap.h
include/linux/ceph/osd_client.h
include/linux/ceph/rados.h
include/linux/clk-provider.h
include/linux/clk.h
include/linux/cpumask.h
include/linux/debugfs.h
include/linux/eventfd.h
include/linux/firmware/xlnx-zynqmp.h
include/linux/fs.h
include/linux/intel-iommu.h
include/linux/io-pgtable.h
include/linux/iommu.h
include/linux/libata.h
include/linux/memory_hotplug.h
include/linux/mfd/cros_ec.h [deleted file]
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/pagewalk.h
include/linux/pci-ats.h
include/linux/percpu-defs.h
include/linux/perf_event.h
include/linux/platform_data/cros_ec_proto.h
include/linux/proc_fs.h
include/linux/ptdump.h [new file with mode: 0644]
include/linux/remoteproc/mtk_scp.h [new file with mode: 0644]
include/linux/ring_buffer.h
include/linux/rpmsg/mtk_rpmsg.h [new file with mode: 0644]
include/linux/rtc.h
include/linux/seq_file.h
include/linux/slab.h
include/linux/string.h
include/linux/sunrpc/stats.h
include/linux/trace_events.h
include/linux/tracefs.h
include/sound/pcm.h
include/trace/events/intel_iommu.h
include/trace/events/pwm.h [new file with mode: 0644]
include/trace/trace_events.h
include/uapi/linux/rtc.h
include/uapi/sound/asound.h
include/xen/xenbus.h
init/Kconfig
init/main.c
ipc/mqueue.c
ipc/msg.c
ipc/sem.c
ipc/util.c
kernel/configs.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/irq/proc.c
kernel/kallsyms.c
kernel/latencytop.c
kernel/locking/lockdep_proc.c
kernel/module.c
kernel/profile.c
kernel/sched/psi.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/kprobe_event_gen_test.c [new file with mode: 0644]
kernel/trace/ring_buffer.c
kernel/trace/ring_buffer_benchmark.c
kernel/trace/synth_event_gen_test.c [new file with mode: 0644]
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_boot.c [new file with mode: 0644]
kernel/trace/trace_branch.c
kernel/trace/trace_dynevent.c
kernel/trace/trace_dynevent.h
kernel/trace/trace_entries.h
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_functions.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_kdb.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_mmiotrace.c
kernel/trace/trace_output.c
kernel/trace/trace_sched_switch.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_selftest.c
kernel/trace/trace_seq.c
kernel/trace/trace_stat.c
kernel/trace/trace_syscalls.c
kernel/trace/trace_uprobe.c
lib/Kconfig
lib/Makefile
lib/bitmap.c
lib/bootconfig.c [new file with mode: 0644]
lib/string.c
lib/test_bitmap.c
mm/Kconfig.debug
mm/Makefile
mm/gup.c
mm/hmm.c
mm/memory.c
mm/memory_hotplug.c
mm/memremap.c
mm/migrate.c
mm/mincore.c
mm/mmu_gather.c
mm/page_alloc.c
mm/pagewalk.c
mm/ptdump.c [new file with mode: 0644]
mm/slab_common.c
mm/sparse.c
mm/swapfile.c
net/atm/mpoa_proc.c
net/atm/proc.c
net/ceph/Makefile
net/ceph/ceph_fs.c [deleted file]
net/ceph/osd_client.c
net/core/dev.c
net/core/filter.c
net/core/pktgen.c
net/hsr/hsr_slave.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/l2tp/l2tp_core.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/x_tables.c
net/netfilter/xt_recent.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/cache.c
net/sunrpc/stats.c
net/xfrm/xfrm_policy.c
samples/kfifo/bytestream-example.c
samples/kfifo/inttype-example.c
samples/kfifo/record-example.c
scripts/coccinelle/free/devm_free.cocci
security/smack/smack_lsm.c
sound/core/info.c
sound/core/pcm_compat.c
sound/core/pcm_native.c
sound/drivers/dummy.c
sound/hda/hdac_stream.c
sound/pci/emu10k1/emufx.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdsp.c
sound/soc/amd/raven/acp3x-i2s.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/codecs/Kconfig
sound/soc/codecs/ak4104.c
sound/soc/codecs/cs4270.c
sound/soc/codecs/max98090.c
sound/soc/codecs/rt1015.c
sound/soc/codecs/rt1308-sdw.c
sound/soc/codecs/rt700-sdw.c
sound/soc/codecs/rt711-sdw.c
sound/soc/codecs/rt715-sdw.c
sound/soc/codecs/tlv320aic32x4.c
sound/soc/intel/boards/bxt_da7219_max98357a.c
sound/soc/intel/boards/bxt_rt298.c
sound/soc/intel/boards/cml_rt1011_rt5682.c
sound/soc/intel/boards/glk_rt5682_max98357a.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/intel/common/soc-acpi-intel-icl-match.c
sound/soc/sof/core.c
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda.c
sound/soc/sof/intel/hda.h
sound/soc/sof/pcm.c
sound/soc/sof/pm.c
sound/soc/sof/sof-pci-dev.c
sound/soc/sof/trace.c
sound/soc/sunxi/sun4i-spdif.c
sound/soc/tegra/tegra30_i2s.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/validate.c
tools/Makefile
tools/bootconfig/.gitignore [new file with mode: 0644]
tools/bootconfig/Makefile [new file with mode: 0644]
tools/bootconfig/include/linux/bootconfig.h [new file with mode: 0644]
tools/bootconfig/include/linux/bug.h [new file with mode: 0644]
tools/bootconfig/include/linux/ctype.h [new file with mode: 0644]
tools/bootconfig/include/linux/errno.h [new file with mode: 0644]
tools/bootconfig/include/linux/kernel.h [new file with mode: 0644]
tools/bootconfig/include/linux/printk.h [new file with mode: 0644]
tools/bootconfig/include/linux/string.h [new file with mode: 0644]
tools/bootconfig/main.c [new file with mode: 0644]
tools/bootconfig/samples/bad-array-space-comment.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-array.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-dotword.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-empty.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-keyerror.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-longkey.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-manywords.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-no-keyword.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-nonprintable.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-spaceword.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-tree.bconf [new file with mode: 0644]
tools/bootconfig/samples/bad-value.bconf [new file with mode: 0644]
tools/bootconfig/samples/escaped.bconf [new file with mode: 0644]
tools/bootconfig/samples/good-array-space-comment.bconf [new file with mode: 0644]
tools/bootconfig/samples/good-comment-after-value.bconf [new file with mode: 0644]
tools/bootconfig/samples/good-printables.bconf [new file with mode: 0644]
tools/bootconfig/samples/good-simple.bconf [new file with mode: 0644]
tools/bootconfig/samples/good-single.bconf [new file with mode: 0644]
tools/bootconfig/samples/good-space-after-value.bconf [new file with mode: 0644]
tools/bootconfig/samples/good-tree.bconf [new file with mode: 0644]
tools/bootconfig/test-bootconfig.sh [new file with mode: 0755]
tools/include/linux/bitops.h
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc [new file with mode: 0644]
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/fin_ack_lat.c [new file with mode: 0644]
tools/testing/selftests/net/fin_ack_lat.sh [new file with mode: 0755]
tools/testing/selftests/powerpc/eeh/eeh-functions.sh
tools/testing/selftests/powerpc/mm/.gitignore
tools/testing/selftests/powerpc/mm/Makefile
tools/testing/selftests/powerpc/mm/bad_accesses.c [new file with mode: 0644]
tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
tools/testing/selftests/tc-testing/tc-tests/filters/basic.json

index de36dce..a675b67 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -18,6 +18,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
 Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
 Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
+Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
 Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
 Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
 Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
index 9744728..25910c3 100644 (file)
@@ -33,6 +33,14 @@ Description:
                  Requires a separate RTC_PIE_ON call to enable the periodic
                  interrupts.
 
+               * RTC_VL_READ: Read the voltage inputs status of the RTC when
+                 supported. The value is a bit field of RTC_VL_*, giving the
+                 status of the main and backup voltages.
+
+               * RTC_VL_CLEAR: Clear the voltage status of the RTC. Some RTCs
+                 need user interaction when the backup power provider is
+                 replaced or charged to be able to clear the status.
+
                The ioctl() calls supported by the older /dev/rtc interface are
                also supported by the newer RTC class framework. However,
                because the chips and systems are not standardized, some PC/AT
index 6a733bf..73308c2 100644 (file)
@@ -11,3 +11,16 @@ Description:
                 #echo 00:19.0-E0:2:FF > /sys/bus/pci/drivers/pciback/quirks
                 will allow the guest to read and write to the configuration
                 register 0x0E.
+
+What:           /sys/bus/pci/drivers/pciback/allow_interrupt_control
+Date:           Jan 2020
+KernelVersion:  5.6
+Contact:        xen-devel@lists.xenproject.org
+Description:
+                List of devices which can have interrupt control flag (INTx,
+                MSI, MSI-X) set by a connected guest. It is meant to be set
+                only when the guest is a stubdomain hosting device model (qemu)
+                and the actual device is assigned to a HVM. It is not safe
+                (similar to permissive attribute) to set for a devices assigned
+                to a PV guest. The device is automatically removed from this
+                list when the connected pcifront terminates.
index 4e7babb..ecb7942 100644 (file)
@@ -25,3 +25,13 @@ Description:
                 allocated without being in use. The time is in
                 seconds, 0 means indefinitely long.
                 The default is 60 seconds.
+
+What:           /sys/module/xen_blkback/parameters/buffer_squeeze_duration_ms
+Date:           December 2019
+KernelVersion:  5.6
+Contact:        SeongJae Park <sjpark@amazon.de>
+Description:
+                When memory pressure is reported to blkback this option
+                controls the duration in milliseconds that blkback will not
+                cache any page not backed by a grant mapping.
+                The default is 10ms.
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
new file mode 100644 (file)
index 0000000..b342a67
--- /dev/null
@@ -0,0 +1,190 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _bootconfig:
+
+==================
+Boot Configuration
+==================
+
+:Author: Masami Hiramatsu <mhiramat@kernel.org>
+
+Overview
+========
+
+The boot configuration expands the current kernel command line to support
+additional key-value data when booting the kernel in an efficient way.
+This allows administrators to pass a structured-Key config file.
+
+Config File Syntax
+==================
+
+The boot config syntax is a simple structured key-value. Each key consists
+of dot-connected-words, and key and value are connected by ``=``. The value
+has to be terminated by semi-colon (``;``) or newline (``\n``).
+For array value, array entries are separated by comma (``,``). ::
+
+KEY[.WORD[...]] = VALUE[, VALUE2[...]][;]
+
+Unlike the kernel command line syntax, spaces are OK around the comma and ``=``.
+
+Each key word must contain only alphabets, numbers, dash (``-``) or underscore
+(``_``). And each value only contains printable characters or spaces except
+for delimiters such as semi-colon (``;``), new-line (``\n``), comma (``,``),
+hash (``#``) and closing brace (``}``).
+
+If you want to use those delimiters in a value, you can use either double-
+quotes (``"VALUE"``) or single-quotes (``'VALUE'``) to quote it. Note that
+you can not escape these quotes.
+
+There can be a key which doesn't have value or has an empty value. Those keys
+are used for checking if the key exists or not (like a boolean).
+
+Key-Value Syntax
+----------------
+
+The boot config file syntax allows user to merge partially same word keys
+by brace. For example::
+
+ foo.bar.baz = value1
+ foo.bar.qux.quux = value2
+
+These can be written also in::
+
+ foo.bar {
+    baz = value1
+    qux.quux = value2
+ }
+
+Or more shorter, written as following::
+
+ foo.bar { baz = value1; qux.quux = value2 }
+
+In both styles, same key words are automatically merged when parsing it
+at boot time. So you can append similar trees or key-values.
+
+Comments
+--------
+
+The config syntax accepts shell-script style comments. The comments starting
+with hash ("#") until newline ("\n") will be ignored.
+
+::
+
+ # comment line
+ foo = value # value is set to foo.
+ bar = 1, # 1st element
+       2, # 2nd element
+       3  # 3rd element
+
+This is parsed as below::
+
+ foo = value
+ bar = 1, 2, 3
+
+Note that you can not put a comment between value and delimiter(``,`` or
+``;``). This means following config has a syntax error ::
+
+ key = 1 # comment
+       ,2
+
+
+/proc/bootconfig
+================
+
+/proc/bootconfig is a user-space interface of the boot config.
+Unlike /proc/cmdline, this file shows the key-value style list.
+Each key-value pair is shown in each line with following style::
+
+ KEY[.WORDS...] = "[VALUE]"[,"VALUE2"...]
+
+
+Boot Kernel With a Boot Config
+==============================
+
+Since the boot configuration file is loaded with initrd, it will be added
+to the end of the initrd (initramfs) image file. The Linux kernel decodes
+the last part of the initrd image in memory to get the boot configuration
+data.
+Because of this "piggyback" method, there is no need to change or
+update the boot loader and the kernel image itself.
+
+To do this operation, Linux kernel provides "bootconfig" command under
+tools/bootconfig, which allows admin to apply or delete the config file
+to/from initrd image. You can build it by the following command::
+
+ # make -C tools/bootconfig
+
+To add your boot config file to initrd image, run bootconfig as below
+(Old data is removed automatically if exists)::
+
+ # tools/bootconfig/bootconfig -a your-config /boot/initrd.img-X.Y.Z
+
+To remove the config from the image, you can use -d option as below::
+
+ # tools/bootconfig/bootconfig -d /boot/initrd.img-X.Y.Z
+
+Then add "bootconfig" on the normal kernel command line to tell the
+kernel to look for the bootconfig at the end of the initrd file.
+
+Config File Limitation
+======================
+
+Currently the maximum config size size is 32KB and the total key-words (not
+key-value entries) must be under 1024 nodes.
+Note: this is not the number of entries but nodes, an entry must consume
+more than 2 nodes (a key-word and a value). So theoretically, it will be
+up to 512 key-value pairs. If keys contains 3 words in average, it can
+contain 256 key-value pairs. In most cases, the number of config items
+will be under 100 entries and smaller than 8KB, so it would be enough.
+If the node number exceeds 1024, parser returns an error even if the file
+size is smaller than 32KB.
+Anyway, since bootconfig command verifies it when appending a boot config
+to initrd image, user can notice it before boot.
+
+
+Bootconfig APIs
+===============
+
+User can query or loop on key-value pairs, also it is possible to find
+a root (prefix) key node and find key-values under that node.
+
+If you have a key string, you can query the value directly with the key
+using xbc_find_value(). If you want to know what keys exist in the boot
+config, you can use xbc_for_each_key_value() to iterate key-value pairs.
+Note that you need to use xbc_array_for_each_value() for accessing
+each array's value, e.g.::
+
+ vnode = NULL;
+ xbc_find_value("key.word", &vnode);
+ if (vnode && xbc_node_is_array(vnode))
+    xbc_array_for_each_value(vnode, value) {
+      printk("%s ", value);
+    }
+
+If you want to focus on keys which have a prefix string, you can use
+xbc_find_node() to find a node by the prefix string, and iterate
+keys under the prefix node with xbc_node_for_each_key_value().
+
+But the most typical usage is to get the named value under prefix
+or get the named array under prefix as below::
+
+ root = xbc_find_node("key.prefix");
+ value = xbc_node_find_value(root, "option", &vnode);
+ ...
+ xbc_node_for_each_array_value(root, "array-option", value, anode) {
+    ...
+ }
+
+This accesses a value of "key.prefix.option" and an array of
+"key.prefix.array-option".
+
+Locking is not needed, since after initialization, the config becomes
+read-only. All data and keys must be copied if you need to modify it.
+
+
+Functions and structures
+========================
+
+.. kernel-doc:: include/linux/bootconfig.h
+.. kernel-doc:: lib/bootconfig.c
+
index 4433f39..f1d0ccf 100644 (file)
@@ -64,6 +64,7 @@ configure specific aspects of kernel behavior to your liking.
    binderfs
    binfmt-misc
    blockdev/index
+   bootconfig
    braille-console
    btmrvl
    cgroup-v1/index
index ddc5ccd..dbc22d6 100644 (file)
                        no delay (0).
                        Format: integer
 
+       bootconfig      [KNL]
+                       Extended command line options can be added to an initrd
+                       and this will cause the kernel to look for it.
+
+                       See Documentation/admin-guide/bootconfig.rst
+
        bert_disable    [ACPI]
                        Disable BERT OS support on buggy BIOSes.
 
index f79683a..b0a7454 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/arm/fsl.yaml#
+$id: http://devicetree.org/schemas/arm/fsl.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Freescale i.MX Platforms Device Tree Bindings
index e39d8f0..b5bef5a 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/arm/qcom.yaml#
+$id: http://devicetree.org/schemas/arm/qcom.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: QCOM device tree bindings
diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8-ddr-clkc.yaml b/Documentation/devicetree/bindings/clock/amlogic,meson8-ddr-clkc.yaml
new file mode 100644 (file)
index 0000000..4b8669f
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/amlogic,meson8-ddr-clkc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic DDR Clock Controller Device Tree Bindings
+
+maintainers:
+  - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+properties:
+  compatible:
+    enum:
+      - amlogic,meson8-ddr-clkc
+      - amlogic,meson8b-ddr-clkc
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: xtal
+
+  "#clock-cells":
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    ddr_clkc: clock-controller@400 {
+      compatible = "amlogic,meson8-ddr-clkc";
+      reg = <0x400 0x20>;
+      clocks = <&xtal>;
+      clock-names = "xtal";
+      #clock-cells = <1>;
+    };
+
+...
index 4d94091..cc51e47 100644 (file)
@@ -11,6 +11,11 @@ Required Properties:
        - "amlogic,meson8m2-clkc" for Meson8m2 (S812) SoCs
 - #clock-cells: should be 1.
 - #reset-cells: should be 1.
+- clocks: list of clock phandles, one for each entry in clock-names
+- clock-names: should contain the following:
+  * "xtal": the 24MHz system oscillator
+  * "ddr_pll": the DDR PLL clock
+  * "clk_32k": (if present) the 32kHz clock signal from GPIOAO_6 (CLK_32K_IN)
 
 Parent node should have the following properties :
 - compatible: "amlogic,meson-hhi-sysctrl", "simple-mfd", "syscon"
index e638273..8559fe8 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/bitmain,bm1880-clk.yaml#
+$id: http://devicetree.org/schemas/clock/bitmain,bm1880-clk.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Bitmain BM1880 Clock Controller
diff --git a/Documentation/devicetree/bindings/clock/fsl,plldig.yaml b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
new file mode 100644 (file)
index 0000000..c835003
--- /dev/null
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/fsl,plldig.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP QorIQ Layerscape LS1028A Display PIXEL Clock Binding
+
+maintainers:
+  - Wen He <wen.he_1@nxp.com>
+
+description: |
+  NXP LS1028A has a clock domain PXLCLK0 used for the Display output
+  interface in the display core, as implemented in TSMC CLN28HPM PLL.
+  which generate and offers pixel clocks to Display.
+
+properties:
+  compatible:
+    const: fsl,ls1028a-plldig
+
+  reg:
+    maxItems: 1
+
+  '#clock-cells':
+    const: 0
+
+  fsl,vco-hz:
+     description: Optional for VCO frequency of the PLL in Hertz.
+        The VCO frequency of this PLL cannot be changed during runtime
+        only at startup. Therefore, the output frequencies are very
+        limited and might not even closely match the requested frequency.
+        To work around this restriction the user may specify its own
+        desired VCO frequency for the PLL.
+     minimum: 650000000
+     maximum: 1300000000
+     default: 1188000000
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - '#clock-cells'
+
+examples:
+  # Display PIXEL Clock node:
+  - |
+    dpclk: clock-display@f1f0000 {
+        compatible = "fsl,ls1028a-plldig";
+        reg = <0x0 0xf1f0000 0x0 0xffff>;
+        #clock-cells = <0>;
+        clocks = <&osc_27m>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml b/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml
new file mode 100644 (file)
index 0000000..fc3bdfd
--- /dev/null
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/fsl,sai-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale SAI bitclock-as-a-clock binding
+
+maintainers:
+  - Michael Walle <michael@walle.cc>
+
+description: |
+  It is possible to use the BCLK pin of a SAI module as a generic clock
+  output. Some SoC are very constrained in their pin multiplexer
+  configuration. Eg. pins can only be changed groups. For example, on the
+  LS1028A SoC you can only enable SAIs in pairs. If you use only one SAI,
+  the second pins are wasted. Using this binding it is possible to use the
+  clock of the second SAI as a MCLK clock for an audio codec, for example.
+
+  This is a composite of a gated clock and a divider clock.
+
+properties:
+  compatible:
+    const: fsl,vf610-sai-clock
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  '#clock-cells':
+    const: 0
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    soc {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        mclk: clock-mclk@f130080 {
+            compatible = "fsl,vf610-sai-clock";
+            reg = <0x0 0xf130080 0x0 0x80>;
+            #clock-cells = <0>;
+            clocks = <&parentclk>;
+        };
+    };
index 622f365..cd0b8a3 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/imx8mn-clock.yaml#
+$id: http://devicetree.org/schemas/clock/imx8mn-clock.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: NXP i.MX8M Nano Clock Control Module Binding
diff --git a/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml b/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml
new file mode 100644 (file)
index 0000000..89aee63
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx8mp-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8M Plus Clock Control Module Binding
+
+maintainers:
+  - Anson Huang <Anson.Huang@nxp.com>
+
+description:
+  NXP i.MX8M Plus clock control module is an integrated clock controller, which
+  generates and supplies to all modules.
+
+properties:
+  compatible:
+    const: fsl,imx8mp-ccm
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: 32k osc
+      - description: 24m osc
+      - description: ext1 clock input
+      - description: ext2 clock input
+      - description: ext3 clock input
+      - description: ext4 clock input
+
+  clock-names:
+    items:
+      - const: osc_32k
+      - const: osc_24m
+      - const: clk_ext1
+      - const: clk_ext2
+      - const: clk_ext3
+      - const: clk_ext4
+
+  '#clock-cells':
+    const: 1
+    description:
+      The clock consumer should specify the desired clock by having the clock
+      ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mp-clock.h
+      for the full list of i.MX8M Plus clock IDs.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+
+examples:
+  # Clock Control Module node:
+  - |
+    clk: clock-controller@30380000 {
+        compatible = "fsl,imx8mp-ccm";
+        reg = <0x30380000 0x10000>;
+        #clock-cells = <1>;
+        clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>,
+                 <&clk_ext2>, <&clk_ext3>, <&clk_ext4>;
+        clock-names = "osc_32k", "osc_24m", "clk_ext1",
+                      "clk_ext2", "clk_ext3", "clk_ext4";
+    };
+
+...
index 5cf0b81..f0b804a 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/milbeaut-clock.yaml#
+$id: http://devicetree.org/schemas/clock/milbeaut-clock.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Milbeaut SoCs Clock Controller Binding
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
deleted file mode 100644 (file)
index d639e18..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-Qualcomm Technologies, Inc. Display Clock Controller Binding
-------------------------------------------------------------
-
-Required properties :
-
-- compatible : shall contain "qcom,sdm845-dispcc"
-- reg : shall contain base register location and length.
-- #clock-cells : from common clock binding, shall contain 1.
-- #reset-cells : from common reset binding, shall contain 1.
-- #power-domain-cells : from generic power domain binding, shall contain 1.
-
-Example:
-       dispcc: clock-controller@af00000 {
-               compatible = "qcom,sdm845-dispcc";
-               reg = <0xaf00000 0x100000>;
-               #clock-cells = <1>;
-               #reset-cells = <1>;
-               #power-domain-cells = <1>;
-       };
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,dispcc.yaml
new file mode 100644 (file)
index 0000000..795fe68
--- /dev/null
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,dispcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock & Reset Controller Binding
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm display clock control module which supports the clocks, resets and
+  power domains.
+
+properties:
+  compatible:
+    enum:
+      - qcom,sc7180-dispcc
+      - qcom,sdm845-dispcc
+
+  clocks:
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: Board XO source
+      - description: GPLL0 source from GCC
+
+  clock-names:
+    items:
+      - const: xo
+      - const: gpll0
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  # Example of DISPCC with clock node properties for SDM845:
+  - |
+    clock-controller@af00000 {
+      compatible = "qcom,sdm845-dispcc";
+      reg = <0xaf00000 0x10000>;
+      clocks = <&rpmhcc 0>, <&gcc 24>;
+      clock-names = "xo", "gpll0";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+     };
+...
index e73a56f..e814eec 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/qcom,gcc.yaml#
+$id: http://devicetree.org/schemas/clock/qcom,gcc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Qualcomm Global Clock & Reset Controller Binding
@@ -19,8 +19,9 @@ properties:
     enum:
        - qcom,gcc-apq8064
        - qcom,gcc-apq8084
-       - qcom,gcc-ipq8064
        - qcom,gcc-ipq4019
+       - qcom,gcc-ipq6018
+       - qcom,gcc-ipq8064
        - qcom,gcc-ipq8074
        - qcom,gcc-msm8660
        - qcom,gcc-msm8916
@@ -40,20 +41,50 @@ properties:
        - qcom,gcc-sm8150
 
   clocks:
-    minItems: 1
-    maxItems: 3
-    items:
-      - description: Board XO source
-      - description: Board active XO source
-      - description: Sleep clock source
+    oneOf:
+      #qcom,gcc-sm8150
+      #qcom,gcc-sc7180
+      - items:
+        - description: Board XO source
+        - description: Board active XO source
+        - description: Sleep clock source
+      #qcom,gcc-msm8996
+      - items:
+        - description: XO source
+        - description: Second XO source
+        - description: Sleep clock source
+      #qcom,gcc-msm8998
+      - items:
+        - description: Board XO source
+        - description: Sleep clock source
+        - description: USB 3.0 phy pipe clock
+        - description: UFS phy rx symbol clock for pipe 0
+        - description: UFS phy rx symbol clock for pipe 1
+        - description: UFS phy tx symbol clock
+        - description: PCIE phy pipe clock
 
   clock-names:
-    minItems: 1
-    maxItems: 3
-    items:
-      - const: bi_tcxo
-      - const: bi_tcxo_ao
-      - const: sleep_clk
+    oneOf:
+      #qcom,gcc-sm8150
+      #qcom,gcc-sc7180
+      - items:
+        - const: bi_tcxo
+        - const: bi_tcxo_ao
+        - const: sleep_clk
+      #qcom,gcc-msm8996
+      - items:
+        - const: cxo
+        - const: cxo2
+        - const: sleep_clk
+      #qcom,gcc-msm8998
+      - items:
+        - const: xo
+        - const: sleep_clk
+        - const: usb3_pipe
+        - const: ufs_rx_symbol0
+        - const: ufs_rx_symbol1
+        - const: ufs_tx_symbol0
+        - const: pcie0_pipe
 
   '#clock-cells':
     const: 1
@@ -118,6 +149,7 @@ else:
       compatible:
         contains:
           enum:
+            - qcom,gcc-msm8998
             - qcom,gcc-sm8150
             - qcom,gcc-sc7180
   then:
@@ -179,10 +211,35 @@ examples:
     clock-controller@100000 {
       compatible = "qcom,gcc-sc7180";
       reg = <0x100000 0x1f0000>;
-      clocks = <&rpmhcc 0>, <&rpmhcc 1>;
-      clock-names = "bi_tcxo", "bi_tcxo_ao";
+      clocks = <&rpmhcc 0>, <&rpmhcc 1>, <0>;
+      clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+
+  # Example of MSM8998 GCC:
+  - |
+    #include <dt-bindings/clock/qcom,rpmcc.h>
+    clock-controller@100000 {
+      compatible = "qcom,gcc-msm8998";
       #clock-cells = <1>;
       #reset-cells = <1>;
       #power-domain-cells = <1>;
+      reg = <0x00100000 0xb0000>;
+      clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+               <&sleep>,
+               <0>,
+               <0>,
+               <0>,
+               <0>,
+               <0>;
+      clock-names = "xo",
+                    "sleep_clk",
+                    "usb3_pipe",
+                    "ufs_rx_symbol0",
+                    "ufs_rx_symbol1",
+                    "ufs_tx_symbol0",
+                    "pcie0_pipe";
     };
 ...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
deleted file mode 100644 (file)
index 269afe8..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Qualcomm Graphics Clock & Reset Controller Binding
---------------------------------------------------
-
-Required properties :
-- compatible : shall contain "qcom,sdm845-gpucc" or "qcom,msm8998-gpucc"
-- reg : shall contain base register location and length
-- #clock-cells : from common clock binding, shall contain 1
-- #reset-cells : from common reset binding, shall contain 1
-- #power-domain-cells : from generic power domain binding, shall contain 1
-- clocks : shall contain the XO clock
-          shall contain the gpll0 out main clock (msm8998)
-- clock-names : shall be "xo"
-               shall be "gpll0" (msm8998)
-
-Example:
-       gpucc: clock-controller@5090000 {
-               compatible = "qcom,sdm845-gpucc";
-               reg = <0x5090000 0x9000>;
-               #clock-cells = <1>;
-               #reset-cells = <1>;
-               #power-domain-cells = <1>;
-               clocks = <&rpmhcc RPMH_CXO_CLK>;
-               clock-names = "xo";
-       };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml
new file mode 100644 (file)
index 0000000..679e7fe
--- /dev/null
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gpucc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Graphics Clock & Reset Controller Binding
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm grpahics clock control module which supports the clocks, resets and
+  power domains.
+
+properties:
+  compatible:
+    enum:
+      - qcom,msm8998-gpucc
+      - qcom,sc7180-gpucc
+      - qcom,sdm845-gpucc
+
+  clocks:
+    minItems: 1
+    maxItems: 3
+    items:
+      - description: Board XO source
+      - description: GPLL0 main branch source from GCC(gcc_gpu_gpll0_clk_src)
+      - description: GPLL0 div branch source from GCC(gcc_gpu_gpll0_div_clk_src)
+
+  clock-names:
+    minItems: 1
+    maxItems: 3
+    items:
+      - const: xo
+      - const: gpll0_main
+      - const: gpll0_div
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  # Example of GPUCC with clock node properties for SDM845:
+  - |
+    clock-controller@5090000 {
+      compatible = "qcom,sdm845-gpucc";
+      reg = <0x5090000 0x9000>;
+      clocks = <&rpmhcc 0>, <&gcc 31>, <&gcc 32>;
+      clock-names = "xo", "gpll0_main", "gpll0_div";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+     };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.txt b/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
deleted file mode 100644 (file)
index 8b0f784..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-Qualcomm Multimedia Clock & Reset Controller Binding
-----------------------------------------------------
-
-Required properties :
-- compatible : shall contain only one of the following:
-
-                       "qcom,mmcc-apq8064"
-                       "qcom,mmcc-apq8084"
-                       "qcom,mmcc-msm8660"
-                       "qcom,mmcc-msm8960"
-                       "qcom,mmcc-msm8974"
-                       "qcom,mmcc-msm8996"
-
-- reg : shall contain base register location and length
-- #clock-cells : shall contain 1
-- #reset-cells : shall contain 1
-
-Optional properties :
-- #power-domain-cells : shall contain 1
-
-Example:
-       clock-controller@4000000 {
-               compatible = "qcom,mmcc-msm8960";
-               reg = <0x4000000 0x1000>;
-               #clock-cells = <1>;
-               #reset-cells = <1>;
-               #power-domain-cells = <1>;
-       };
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
new file mode 100644 (file)
index 0000000..8551849
--- /dev/null
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,mmcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Multimedia Clock & Reset Controller Binding
+
+maintainers:
+  - Jeffrey Hugo <jhugo@codeaurora.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm multimedia clock control module which supports the clocks, resets and
+  power domains.
+
+properties:
+  compatible :
+    enum:
+       - qcom,mmcc-apq8064
+       - qcom,mmcc-apq8084
+       - qcom,mmcc-msm8660
+       - qcom,mmcc-msm8960
+       - qcom,mmcc-msm8974
+       - qcom,mmcc-msm8996
+       - qcom,mmcc-msm8998
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: Board sleep source
+      - description: Global PLL 0 clock
+      - description: DSI phy instance 0 dsi clock
+      - description: DSI phy instance 0 byte clock
+      - description: DSI phy instance 1 dsi clock
+      - description: DSI phy instance 1 byte clock
+      - description: HDMI phy PLL clock
+      - description: DisplayPort phy PLL vco clock
+      - description: DisplayPort phy PLL link clock
+
+  clock-names:
+    items:
+      - const: xo
+      - const: sleep
+      - const: gpll0
+      - const: dsi0dsi
+      - const: dsi0byte
+      - const: dsi1dsi
+      - const: dsi1byte
+      - const: hdmipll
+      - const: dpvco
+      - const: dplink
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  protected-clocks:
+    description:
+       Protected clock specifier list as per common clock binding
+
+required:
+  - compatible
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: qcom,mmcc-msm8998
+
+then:
+  required:
+    - clocks
+    - clock-names
+
+examples:
+  # Example for MMCC for MSM8960:
+  - |
+    clock-controller@4000000 {
+      compatible = "qcom,mmcc-msm8960";
+      reg = <0x4000000 0x1000>;
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
index 94e2f14..2cd158f 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/qcom,rpmhcc.yaml#
+$id: http://devicetree.org/schemas/clock/qcom,rpmhcc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Qualcomm Technologies, Inc. RPMh Clocks Bindings
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
deleted file mode 100644 (file)
index 8a8622c..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-Qualcomm Video Clock & Reset Controller Binding
------------------------------------------------
-
-Required properties :
-- compatible : shall contain "qcom,sdm845-videocc"
-- reg : shall contain base register location and length
-- #clock-cells : from common clock binding, shall contain 1.
-- #power-domain-cells : from generic power domain binding, shall contain 1.
-- #reset-cells : from common reset binding, shall contain 1.
-
-Example:
-       videocc: clock-controller@ab00000 {
-               compatible = "qcom,sdm845-videocc";
-               reg = <0xab00000 0x10000>;
-               #clock-cells = <1>;
-               #power-domain-cells = <1>;
-               #reset-cells = <1>;
-       };
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,videocc.yaml
new file mode 100644 (file)
index 0000000..2946b24
--- /dev/null
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,videocc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Video Clock & Reset Controller Binding
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm video clock control module which supports the clocks, resets and
+  power domains.
+
+properties:
+  compatible:
+    enum:
+      - qcom,sc7180-videocc
+      - qcom,sdm845-videocc
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: xo
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  # Example of VIDEOCC with clock node properties for SDM845:
+  - |
+    clock-controller@ab00000 {
+      compatible = "qcom,sdm845-videocc";
+      reg = <0xab00000 0x10000>;
+      clocks = <&rpmhcc 0>;
+      clock-names = "xo";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+     };
+...
index c7674d0..f4d153f 100644 (file)
@@ -19,7 +19,7 @@ Required Properties:
       - "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
       - "renesas,r8a77470-cpg-mssr" for the r8a77470 SoC (RZ/G1C)
       - "renesas,r8a774a1-cpg-mssr" for the r8a774a1 SoC (RZ/G2M)
-      - "renesas,r8a774b1-cpg-mssr" for the r8a774a1 SoC (RZ/G2N)
+      - "renesas,r8a774b1-cpg-mssr" for the r8a774b1 SoC (RZ/G2N)
       - "renesas,r8a774c0-cpg-mssr" for the r8a774c0 SoC (RZ/G2E)
       - "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2)
       - "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W)
index b8f91e4..4e38550 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/st,stm32mp1-rcc.yaml#
+$id: http://devicetree.org/schemas/clock/st,stm32mp1-rcc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Reset Clock Controller Binding
index 48ee699..18af6b9 100644 (file)
@@ -16,18 +16,23 @@ For more information, please see the Linux clock framework binding at
 Documentation/devicetree/bindings/clock/clock-bindings.txt.
 
 Required properties :
-- compatible : shall be "ti,clkctrl"
+- compatible : shall be "ti,clkctrl" or a clock domain specific name:
+              "ti,clkctrl-l4-cfg"
+              "ti,clkctrl-l4-per"
+              "ti,clkctrl-l4-secure"
+              "ti,clkctrl-l4-wkup"
 - #clock-cells : shall contain 2 with the first entry being the instance
                 offset from the clock domain base and the second being the
                 clock index
+- reg : clock registers
 
 Example: Clock controller node on omap 4430:
 
 &cm2 {
        l4per: cm@1400 {
                cm_l4per@0 {
-                       cm_l4per_clkctrl: clk@20 {
-                               compatible = "ti,clkctrl";
+                       cm_l4per_clkctrl: clock@20 {
+                               compatible = "ti,clkctrl-l4-per", "ti,clkctrl";
                                reg = <0x20 0x1b0>;
                                #clock-cells = <2>;
                        };
index 10f7047..21c002d 100644 (file)
@@ -43,7 +43,7 @@ Configuration of ATL instances:
        - aws : Audio word select signal selection
 };
 
-For valid word select signals, see the dt-bindings/clk/ti-dra7-atl.h include
+For valid word select signals, see the dt-bindings/clock/ti-dra7-atl.h include
 file.
 
 Examples:
@@ -83,7 +83,7 @@ atl: atl@4843c000 {
        clock-names = "fck";
 };
 
-#include <dt-bindings/clk/ti-dra7-atl.h>
+#include <dt-bindings/clock/ti-dra7-atl.h>
 
 &atl {
 
diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
new file mode 100644 (file)
index 0000000..229af98
--- /dev/null
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/xlnx,versal-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Versal clock controller
+
+maintainers:
+  - Michal Simek <michal.simek@xilinx.com>
+  - Jolly Shah <jolly.shah@xilinx.com>
+  - Rajan Vaja <rajan.vaja@xilinx.com>
+
+description: |
+  The clock controller is a hardware block of Xilinx versal clock tree. It
+  reads required input clock frequencies from the devicetree and acts as clock
+  provider for all clock consumers of PS clocks.
+
+select: false
+
+properties:
+  compatible:
+    const: xlnx,versal-clk
+
+  "#clock-cells":
+    const: 1
+
+  clocks:
+    description: List of clock specifiers which are external input
+      clocks to the given clock controller.
+    items:
+      - description: reference clock
+      - description: alternate reference clock
+      - description: alternate reference clock for programmable logic
+
+  clock-names:
+    items:
+      - const: ref
+      - const: alt_ref
+      - const: pl_alt_ref
+
+required:
+  - compatible
+  - "#clock-cells"
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    firmware {
+      zynqmp_firmware: zynqmp-firmware {
+        compatible = "xlnx,zynqmp-firmware";
+        method = "smc";
+        versal_clk: clock-controller {
+          #clock-cells = <1>;
+          compatible = "xlnx,versal-clk";
+          clocks = <&ref>, <&alt_ref>, <&pl_alt_ref>;
+          clock-names = "ref", "alt_ref", "pl_alt_ref";
+        };
+      };
+    };
+...
index ae04903..6a742a5 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/hwmon/adi,ltc2947.yaml#
+$id: http://devicetree.org/schemas/hwmon/adi,ltc2947.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Analog Devices LTC2947 high precision power and energy monitor
index e932d5a..f0934b2 100644 (file)
@@ -2,7 +2,7 @@
 # Copyright 2019 Analog Devices Inc.
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/iio/adc/adi,ad7124.yaml#
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7124.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Analog Devices AD7124 ADC device driver
index 567a33a..84d25bd 100644 (file)
@@ -2,7 +2,7 @@
 # Copyright 2019 Analog Devices Inc.
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/iio/adc/adi,ad7192.yaml#
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7192.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Analog Devices AD7192 ADC device driver
index 881059b..0ce2904 100644 (file)
@@ -2,7 +2,7 @@
 # Copyright 2019 Marcus Folkesson <marcus.folkesson@gmail.com>
 %YAML 1.2
 ---
-$id: "http://devicetree.org/schemas/bindings/iio/adc/microchip,mcp3911.yaml#"
+$id: "http://devicetree.org/schemas/iio/adc/microchip,mcp3911.yaml#"
 $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 
 title: Microchip MCP3911 Dual channel analog front end (ADC)
index c914070..acf36ee 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/st,stm32-dfsdm-adc.yaml#
+$id: http://devicetree.org/schemas/iio/adc/st,stm32-dfsdm-adc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: STMicroelectronics STM32 DFSDM ADC device driver
index 13d005b..a285eab 100644 (file)
@@ -2,7 +2,7 @@
 # Copyright 2019 Marcus Folkesson <marcus.folkesson@gmail.com>
 %YAML 1.2
 ---
-$id: "http://devicetree.org/schemas/bindings/iio/dac/lltc,ltc1660.yaml#"
+$id: "http://devicetree.org/schemas/iio/dac/lltc,ltc1660.yaml#"
 $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 
 title: Linear Technology Micropower octal 8-Bit and 10-Bit DACs
index 903475f..b98bf93 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/input/gpio-vibrator.yaml#
+$id: http://devicetree.org/schemas/input/gpio-vibrator.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: GPIO vibrator
index 0f69500..0e57315 100644 (file)
@@ -36,6 +36,8 @@ Optional properties:
  - pinctrl-0:   a phandle pointing to the pin settings for the
                 control gpios
 
+ - wakeup-source: If present the device will act as wakeup-source
+
  - threshold:   allows setting the "click"-threshold in the range
                 from 0 to 80.
 
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
deleted file mode 100644 (file)
index fc03ea4..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-Device tree bindings for Goodix GT9xx series touchscreen controller
-
-Required properties:
-
- - compatible          : Should be "goodix,gt1151"
-                                or "goodix,gt5663"
-                                or "goodix,gt5688"
-                                or "goodix,gt911"
-                                or "goodix,gt9110"
-                                or "goodix,gt912"
-                                or "goodix,gt927"
-                                or "goodix,gt9271"
-                                or "goodix,gt928"
-                                or "goodix,gt967"
- - reg                 : I2C address of the chip. Should be 0x5d or 0x14
- - interrupts          : Interrupt to which the chip is connected
-
-Optional properties:
-
- - irq-gpios           : GPIO pin used for IRQ. The driver uses the
-                         interrupt gpio pin as output to reset the device.
- - reset-gpios         : GPIO pin used for reset
- - AVDD28-supply       : Analog power supply regulator on AVDD28 pin
- - VDDIO-supply                : GPIO power supply regulator on VDDIO pin
- - touchscreen-inverted-x
- - touchscreen-inverted-y
- - touchscreen-size-x
- - touchscreen-size-y
- - touchscreen-swapped-x-y
-
-The touchscreen-* properties are documented in touchscreen.txt in this
-directory.
-
-Example:
-
-       i2c@00000000 {
-               /* ... */
-
-               gt928@5d {
-                       compatible = "goodix,gt928";
-                       reg = <0x5d>;
-                       interrupt-parent = <&gpio>;
-                       interrupts = <0 0>;
-
-                       irq-gpios = <&gpio1 0 0>;
-                       reset-gpios = <&gpio1 1 0>;
-               };
-
-               /* ... */
-       };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
new file mode 100644 (file)
index 0000000..d7c3262
--- /dev/null
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/goodix.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Goodix GT9xx series touchscreen controller Bindings
+
+maintainers:
+  - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  compatible:
+    enum:
+      - goodix,gt1151
+      - goodix,gt5663
+      - goodix,gt5688
+      - goodix,gt911
+      - goodix,gt9110
+      - goodix,gt912
+      - goodix,gt927
+      - goodix,gt9271
+      - goodix,gt928
+      - goodix,gt967
+
+  reg:
+    enum: [ 0x5d, 0x14 ]
+
+  interrupts:
+    maxItems: 1
+
+  irq-gpios:
+    description: GPIO pin used for IRQ.
+                 The driver uses the interrupt gpio pin as
+                 output to reset the device.
+    maxItems: 1
+
+  reset-gpios:
+    maxItems: 1
+
+  AVDD28-supply:
+    description: Analog power supply regulator on AVDD28 pin
+
+  VDDIO-supply:
+    description: GPIO power supply regulator on VDDIO pin
+
+  touchscreen-inverted-x: true
+  touchscreen-inverted-y: true
+  touchscreen-size-x: true
+  touchscreen-size-y: true
+  touchscreen-swapped-x-y: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+examples:
+- |
+    i2c@00000000 {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      gt928@5d {
+        compatible = "goodix,gt928";
+        reg = <0x5d>;
+        interrupt-parent = <&gpio>;
+        interrupts = <0 0>;
+        irq-gpios = <&gpio1 0 0>;
+        reset-gpios = <&gpio1 1 0>;
+      };
+    };
+
+...
index 8641a2d..e1adb90 100644 (file)
@@ -1,39 +1 @@
-General Touchscreen Properties:
-
-Optional properties for Touchscreens:
- - touchscreen-min-x           : minimum x coordinate reported (0 if not set)
- - touchscreen-min-y           : minimum y coordinate reported (0 if not set)
- - touchscreen-size-x          : horizontal resolution of touchscreen
-                                 (maximum x coordinate reported + 1)
- - touchscreen-size-y          : vertical resolution of touchscreen
-                                 (maximum y coordinate reported + 1)
- - touchscreen-max-pressure    : maximum reported pressure (arbitrary range
-                                 dependent on the controller)
- - touchscreen-min-pressure    : minimum pressure on the touchscreen to be
-                                 achieved in order for the touchscreen
-                                 driver to report a touch event.
- - touchscreen-fuzz-x          : horizontal noise value of the absolute input
-                                 device (in pixels)
- - touchscreen-fuzz-y          : vertical noise value of the absolute input
-                                 device (in pixels)
- - touchscreen-fuzz-pressure   : pressure noise value of the absolute input
-                                 device (arbitrary range dependent on the
-                                 controller)
- - touchscreen-average-samples : Number of data samples which are averaged
-                                 for each read (valid values dependent on the
-                                 controller)
- - touchscreen-inverted-x      : X axis is inverted (boolean)
- - touchscreen-inverted-y      : Y axis is inverted (boolean)
- - touchscreen-swapped-x-y     : X and Y axis are swapped (boolean)
-                                 Swapping is done after inverting the axis
- - touchscreen-x-mm            : horizontal length in mm of the touchscreen
- - touchscreen-y-mm            : vertical length in mm of the touchscreen
-
-Deprecated properties for Touchscreens:
- - x-size                      : deprecated name for touchscreen-size-x
- - y-size                      : deprecated name for touchscreen-size-y
- - moving-threshold            : deprecated name for a combination of
-                                 touchscreen-fuzz-x and touchscreen-fuzz-y
- - contact-threshold           : deprecated name for touchscreen-fuzz-pressure
- - x-invert                    : deprecated name for touchscreen-inverted-x
- - y-invert                    : deprecated name for touchscreen-inverted-y
+See touchscreen.yaml
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
new file mode 100644 (file)
index 0000000..d7dac16
--- /dev/null
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/touchscreen.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common touchscreen Bindings
+
+maintainers:
+  - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+properties:
+  touchscreen-min-x:
+    description: minimum x coordinate reported
+    $ref: /schemas/types.yaml#/definitions/uint32
+    default: 0
+
+  touchscreen-min-y:
+    description: minimum y coordinate reported
+    $ref: /schemas/types.yaml#/definitions/uint32
+    default: 0
+
+  touchscreen-size-x:
+    description: horizontal resolution of touchscreen (maximum x coordinate reported + 1)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-size-y:
+    description: vertical resolution of touchscreen (maximum y coordinate reported + 1)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-max-pressure:
+    description: maximum reported pressure (arbitrary range dependent on the controller)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-min-pressure:
+    description: minimum pressure on the touchscreen to be achieved in order for the
+                 touchscreen driver to report a touch event.
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-fuzz-x:
+    description: horizontal noise value of the absolute input device (in pixels)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-fuzz-y:
+    description: vertical noise value of the absolute input device (in pixels)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-fuzz-pressure:
+    description: pressure noise value of the absolute input device (arbitrary range
+                 dependent on the controller)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-average-samples:
+    description: Number of data samples which are averaged for each read (valid values
+                 dependent on the controller)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-inverted-x:
+    description: X axis is inverted
+    type: boolean
+
+  touchscreen-inverted-y:
+    description: Y axis is inverted
+    type: boolean
+
+  touchscreen-swapped-x-y:
+    description: X and Y axis are swapped
+                 Swapping is done after inverting the axis
+    type: boolean
+
+  touchscreen-x-mm:
+    description: horizontal length in mm of the touchscreen
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-y-mm:
+    description: vertical length in mm of the touchscreen
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+dependencies:
+  touchscreen-size-x: [ touchscreen-size-y ]
+  touchscreen-size-y: [ touchscreen-size-x ]
+  touchscreen-x-mm: [ touchscreen-y-mm ]
+  touchscreen-y-mm: [ touchscreen-x-mm ]
index 5a8b462..3c36334 100644 (file)
@@ -86,6 +86,12 @@ have a means to turn off translation. But it is invalid in such cases to
 disable the IOMMU's device tree node in the first place because it would
 prevent any driver from properly setting up the translations.
 
+Optional properties:
+--------------------
+- pasid-num-bits: Some masters support multiple address spaces for DMA, by
+  tagging DMA transactions with an address space identifier. By default,
+  this is 0, which means that the device only has one address space.
+
 
 Notes:
 ======
index 0278482..beec612 100644 (file)
@@ -21,10 +21,11 @@ platforms.
        Usage: required
        Value type: <prop-encoded-array>
        Definition: must specify the base address and size of the global block
+
 - clocks:
-       Usage: required if #clocks-cells property is present
-       Value type: <phandle>
-       Definition: phandle to the input PLL, which feeds the APCS mux/divider
+       Usage: required if #clock-names property is present
+       Value type: <phandle array>
+       Definition: phandles to the two parent clocks of the clock driver.
 
 - #mbox-cells:
        Usage: required
@@ -36,6 +37,12 @@ platforms.
        Value type: <u32>
        Definition: as described in clock.txt, must be 0
 
+- clock-names:
+       Usage: required if the platform data based clock driver needs to
+       retrieve the parent clock names from device tree.
+       This will requires two mandatory clocks to be defined.
+       Value type: <string-array>
+       Definition: must be "pll" and "aux"
 
 = EXAMPLE
 The following example describes the APCS HMSS found in MSM8996 and part of the
@@ -68,3 +75,14 @@ Below is another example of the APCS binding on MSM8916 platforms:
                clocks = <&a53pll>;
                #clock-cells = <0>;
        };
+
+Below is another example of the APCS binding on QCS404 platforms:
+
+       apcs_glb: mailbox@b011000 {
+               compatible = "qcom,qcs404-apcs-apps-global", "syscon";
+               reg = <0x0b011000 0x1000>;
+               #mbox-cells = <1>;
+               clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
+               clock-names = "pll", "aux";
+               #clock-cells = <0>;
+       };
index db605d8..48a98da 100644 (file)
@@ -107,7 +107,6 @@ additionalProperties: false
 examples:
   - |
     #include <dt-bindings/gpio/gpio.h>
-    #include <dt-bindings/clock/intel,lgm-clk.h>
     pcie10: pcie@d0e00000 {
       compatible = "intel,lgm-pcie", "snps,dw-pcie";
       device_type = "pci";
@@ -120,7 +119,6 @@ examples:
       linux,pci-domain = <0>;
       max-link-speed = <4>;
       bus-range = <0x00 0x08>;
-      interrupt-parent = <&ioapic1>;
       #interrupt-cells = <1>;
       interrupt-map-mask = <0 0 0 0x7>;
       interrupt-map = <0 0 0 1 &ioapic1 27 1>,
@@ -129,7 +127,7 @@ examples:
                       <0 0 0 4 &ioapic1 30 1>;
       ranges = <0x02000000 0 0xd4000000 0xd4000000 0 0x04000000>;
       resets = <&rcu0 0x50 0>;
-      clocks = <&cgu0 LGM_GCLK_PCIE10>;
+      clocks = <&cgu0 120>;
       phys = <&cb0phy0>;
       phy-names = "pcie";
       reset-assert-ms = <500>;
index ff7959c..0ccee64 100644 (file)
@@ -45,8 +45,10 @@ examples:
     sysconf: chiptop@e0200000 {
       compatible = "intel,lgm-syscon", "syscon";
       reg = <0xe0200000 0x100>;
+      #address-cells = <1>;
+      #size-cells = <1>;
 
-      emmc-phy: emmc-phy@a8 {
+      emmc_phy: emmc-phy@a8 {
         compatible = "intel,lgm-emmc-phy";
         reg = <0x00a8 0x10>;
         clocks = <&emmc>;
index 1b06f86..a1b8a48 100644 (file)
@@ -3,7 +3,7 @@ Freescale MXS PWM controller
 Required properties:
 - compatible: should be "fsl,imx23-pwm"
 - reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 2. See pwm.yaml in this directory for a description of
+- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
   the cells format.
 - fsl,pwm-number: the number of PWM devices
 
@@ -12,6 +12,6 @@ Example:
 pwm: pwm@80064000 {
        compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
        reg = <0x80064000 0x2000>;
-       #pwm-cells = <2>;
+       #pwm-cells = <3>;
        fsl,pwm-number = <8>;
 };
diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt b/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt
new file mode 100644 (file)
index 0000000..3ba668b
--- /dev/null
@@ -0,0 +1,36 @@
+Mediatek SCP Bindings
+----------------------------------------
+
+This binding provides support for ARM Cortex M4 Co-processor found on some
+Mediatek SoCs.
+
+Required properties:
+- compatible           Should be "mediatek,mt8183-scp"
+- reg                  Should contain the address ranges for the two memory
+                       regions, SRAM and CFG.
+- reg-names            Contains the corresponding names for the two memory
+                       regions. These should be named "sram" & "cfg".
+- clocks               Clock for co-processor (See: ../clock/clock-bindings.txt)
+- clock-names          Contains the corresponding name for the clock. This
+                       should be named "main".
+
+Subnodes
+--------
+
+Subnodes of the SCP represent rpmsg devices. The names of the devices are not
+important. The properties of these nodes are defined by the individual bindings
+for the rpmsg devices - but must contain the following property:
+
+- mtk,rpmsg-name       Contains the name for the rpmsg device. Used to match
+                       the subnode to rpmsg device announced by SCP.
+
+Example:
+
+       scp: scp@10500000 {
+               compatible = "mediatek,mt8183-scp";
+               reg = <0 0x10500000 0 0x80000>,
+                     <0 0x105c0000 0 0x5000>;
+               reg-names = "sram", "cfg";
+               clocks = <&infracfg CLK_INFRA_SCPSYS>;
+               clock-names = "main";
+       };
index 292dfda..9938918 100644 (file)
@@ -10,11 +10,17 @@ on the Qualcomm ADSP Hexagon core.
                    "qcom,msm8974-adsp-pil"
                    "qcom,msm8996-adsp-pil"
                    "qcom,msm8996-slpi-pil"
+                   "qcom,msm8998-adsp-pas"
+                   "qcom,msm8998-slpi-pas"
                    "qcom,qcs404-adsp-pas"
                    "qcom,qcs404-cdsp-pas"
                    "qcom,qcs404-wcss-pas"
                    "qcom,sdm845-adsp-pas"
                    "qcom,sdm845-cdsp-pas"
+                   "qcom,sm8150-adsp-pas"
+                   "qcom,sm8150-cdsp-pas"
+                   "qcom,sm8150-mpss-pas"
+                   "qcom,sm8150-slpi-pas"
 
 - interrupts-extended:
        Usage: required
@@ -29,12 +35,18 @@ on the Qualcomm ADSP Hexagon core.
        qcom,msm8974-adsp-pil:
        qcom,msm8996-adsp-pil:
        qcom,msm8996-slpi-pil:
+       qcom,msm8998-adsp-pas:
+       qcom,msm8998-slpi-pas:
        qcom,qcs404-adsp-pas:
        qcom,qcs404-cdsp-pas:
        qcom,sdm845-adsp-pas:
        qcom,sdm845-cdsp-pas:
+       qcom,sm8150-adsp-pas:
+       qcom,sm8150-cdsp-pas:
+       qcom,sm8150-slpi-pas:
                    must be "wdog", "fatal", "ready", "handover", "stop-ack"
        qcom,qcs404-wcss-pas:
+       qcom,sm8150-mpss-pas:
                    must be "wdog", "fatal", "ready", "handover", "stop-ack",
                    "shutdown-ack"
 
@@ -67,6 +79,38 @@ on the Qualcomm ADSP Hexagon core.
        Definition: reference to the px regulator to be held on behalf of the
                    booting Hexagon core
 
+- power-domains:
+       Usage: required
+       Value type: <phandle>
+       Definition: reference to power-domains that match the power-domain-names
+
+- power-domain-names:
+       Usage: required
+       Value type: <stringlist>
+       Definition: The power-domains needed depend on the compatible string:
+       qcom,msm8974-adsp-pil:
+       qcom,msm8996-adsp-pil:
+       qcom,msm8998-adsp-pas:
+                   must be "cx"
+       qcom,msm8996-slpi-pil:
+                   must be "ss_cx"
+       qcom,msm8998-slpi-pas:
+                   must be "ssc_cx"
+       qcom,qcs404-adsp-pas:
+                   must be "lpi_cx"
+       qcom,qcs404-cdsp-pas:
+       qcom,qcs404-wcss-pas:
+                   must be "mx"
+       qcom,sdm845-adsp-pas:
+       qcom,sdm845-cdsp-pas:
+       qcom,sm8150-adsp-pas:
+       qcom,sm8150-cdsp-pas:
+                   must be "cx", "load_state"
+       qcom,sm8150-mpss-pas:
+                   must be "cx", "load_state", "mss"
+       qcom,sm8150-slpi-pas:
+                   must be "lcx", "lmx", "load_state"
+
 - memory-region:
        Usage: required
        Value type: <phandle>
index c416746..88dfa3f 100644 (file)
@@ -13,6 +13,7 @@ on the Qualcomm Hexagon core.
                    "qcom,msm8974-mss-pil"
                    "qcom,msm8996-mss-pil"
                    "qcom,msm8998-mss-pil"
+                   "qcom,sc7180-mss-pil"
                    "qcom,sdm845-mss-pil"
 
 - reg:
@@ -43,6 +44,7 @@ on the Qualcomm Hexagon core.
                    must be "wdog", "fatal", "ready", "handover", "stop-ack"
        qcom,msm8996-mss-pil:
        qcom,msm8998-mss-pil:
+       qcom,sc7180-mss-pil:
        qcom,sdm845-mss-pil:
                    must be "wdog", "fatal", "ready", "handover", "stop-ack",
                    "shutdown-ack"
@@ -75,6 +77,9 @@ on the Qualcomm Hexagon core.
        qcom,msm8998-mss-pil:
                    must be "iface", "bus", "mem", "xo", "gpll0_mss",
                    "snoc_axi", "mnoc_axi", "qdss"
+       qcom,sc7180-mss-pil:
+                   must be "iface", "bus", "xo", "snoc_axi", "mnoc_axi",
+                   "mss_crypto", "mss_nav", "nav"
        qcom,sdm845-mss-pil:
                    must be "iface", "bus", "mem", "xo", "gpll0_mss",
                    "snoc_axi", "mnoc_axi", "prng"
@@ -86,7 +91,7 @@ on the Qualcomm Hexagon core.
                    reference to the list of 3 reset-controllers for the
                    wcss sub-system
                    reference to the list of 2 reset-controllers for the modem
-                   sub-system on SDM845 SoCs
+                   sub-system on SC7180, SDM845 SoCs
 
 - reset-names:
        Usage: required
@@ -95,7 +100,7 @@ on the Qualcomm Hexagon core.
                    must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
                    for the wcss sub-system
                    must be "mss_restart", "pdc_reset" for the modem
-                   sub-system on SDM845 SoCs
+                   sub-system on SC7180, SDM845 SoCs
 
 For the compatible strings below the following supplies are required:
   "qcom,q6v5-pil"
@@ -144,6 +149,7 @@ For the compatible string below the following supplies are required:
        qcom,msm8996-mss-pil:
        qcom,msm8998-mss-pil:
                    must be "cx", "mx"
+       qcom,sc7180-mss-pil:
        qcom,sdm845-mss-pil:
                    must be "cx", "mx", "mss", "load_state"
 
@@ -165,6 +171,19 @@ For the compatible string below the following supplies are required:
                    by the three offsets within syscon for q6, modem and nc
                    halt registers.
 
+For the compatible strings below the following phandle references are required:
+  "qcom,sc7180-mss-pil"
+- qcom,halt-nav-regs:
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: reference to a list of 2 phandles with one offset each for
+                   the modem sub-system running on SC7180 SoC. The first
+                   phandle reference is to the mss clock node followed by the
+                   offset within register space for nav halt register. The
+                   second phandle reference is to a syscon representing TCSR
+                   followed by the offset within syscon for conn_box_spare0
+                   register.
+
 = SUBNODES:
 The Hexagon node must contain two subnodes, named "mba" and "mpss" representing
 the memory regions used by the Hexagon firmware. Each sub-node must contain:
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
deleted file mode 100644 (file)
index 5d3791e..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-Atmel AT91RM9200 Real Time Clock
-
-Required properties:
-- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc"
-- reg: physical base address of the controller and length of memory mapped
-  region.
-- interrupts: rtc alarm/event interrupt
-- clocks: phandle to input clock.
-
-Example:
-
-rtc@fffffe00 {
-       compatible = "atmel,at91rm9200-rtc";
-       reg = <0xfffffe00 0x100>;
-       interrupts = <1 4 7>;
-       clocks = <&clk32k>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
new file mode 100644 (file)
index 0000000..02bbfe7
--- /dev/null
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/atmel,at91rm9200-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel AT91 RTC Device Tree Bindings
+
+allOf:
+  - $ref: "rtc.yaml#"
+
+maintainers:
+  - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+  compatible:
+    enum:
+      - atmel,at91rm9200-rtc
+      - atmel,at91sam9x5-rtc
+      - atmel,sama5d4-rtc
+      - atmel,sama5d2-rtc
+      - microchip,sam9x60-rtc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+
+additionalProperties: false
+
+examples:
+  - |
+    rtc@fffffe00 {
+        compatible = "atmel,at91rm9200-rtc";
+        reg = <0xfffffe00 0x100>;
+        interrupts = <1 4 7>;
+        clocks = <&clk32k>;
+    };
+...
index 411375e..0654380 100644 (file)
@@ -15,13 +15,13 @@ Required properties:
 - clock-frequency : input clock frequency to non FSL_SOC cores
 
 Optional properties:
-- gpios : specifies the gpio pins to be used for chipselects.
+- cs-gpios : specifies the gpio pins to be used for chipselects.
   The gpios will be referred to as reg = <index> in the SPI child nodes.
   If unspecified, a single SPI device without a chip select can be used.
 - fsl,spisel_boot : for the MPC8306 and MPC8309, specifies that the
   SPISEL_BOOT signal is used as chip select for a slave device. Use
   reg = <number of gpios> in the corresponding child node, i.e. 0 if
-  the gpios property is not present.
+  the cs-gpios property is not present.
 
 Example:
        spi@4c0 {
@@ -31,8 +31,8 @@ Example:
                interrupts = <82 0>;
                interrupt-parent = <700>;
                mode = "cpu";
-               gpios = <&gpio 18 1     // device reg=<0>
-                        &gpio 19 1>;   // device reg=<1>
+               cs-gpios = <&gpio 18 1          // device reg=<0>
+                           &gpio 19 1>;        // device reg=<1>
        };
 
 
index a9a7a3c..e5953e7 100644 (file)
@@ -151,8 +151,8 @@ The details of these operations are:
      Note that callbacks will always be invoked from the DMA
      engines tasklet, never from interrupt context.
 
-  Optional: per descriptor metadata
-  ---------------------------------
+Optional: per descriptor metadata
+---------------------------------
   DMAengine provides two ways for metadata support.
 
   DESC_METADATA_CLIENT
index 4fae046..32b2972 100644 (file)
@@ -24,7 +24,7 @@
     |      parisc: |  ok  |
     |     powerpc: |  ok  |
     |       riscv: | TODO |
-    |        s390: | TODO |
+    |        s390: |  ok  |
     |          sh: | TODO |
     |       sparc: | TODO |
     |          um: | TODO |
index ec3b586..7146da0 100644 (file)
@@ -1868,12 +1868,16 @@ There are some more advanced barrier functions:
  (*) smp_mb__before_atomic();
  (*) smp_mb__after_atomic();
 
-     These are for use with atomic (such as add, subtract, increment and
-     decrement) functions that don't return a value, especially when used for
-     reference counting.  These functions do not imply memory barriers.
-
-     These are also used for atomic bitop functions that do not return a
-     value (such as set_bit and clear_bit).
+     These are for use with atomic RMW functions that do not imply memory
+     barriers, but where the code needs a memory barrier. Examples for atomic
+     RMW functions that do not imply are memory barrier are e.g. add,
+     subtract, (failed) conditional operations, _relaxed functions,
+     but not atomic_read or atomic_set. A common example where a memory
+     barrier may be required is when atomic ops are used for reference
+     counting.
+
+     These are also used for atomic RMW bitop functions that do not imply a
+     memory barrier (such as set_bit and clear_bit).
 
      As an example, consider a piece of code that marks an object as being dead
      and then decrements the object's reference count:
index ca2136c..0bf32d1 100644 (file)
@@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain.
 
         table inet x {
                flowtable f {
-                       hook ingress priority 0 devices = { eth0, eth1 };
+                       hook ingress priority 0; devices = { eth0, eth1 };
                }
                 chain y {
                         type filter hook forward priority 0; policy accept;
diff --git a/Documentation/powerpc/imc.rst b/Documentation/powerpc/imc.rst
new file mode 100644 (file)
index 0000000..633bcee
--- /dev/null
@@ -0,0 +1,199 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _imc:
+
+===================================
+IMC (In-Memory Collection Counters)
+===================================
+
+Anju T Sudhakar, 10 May 2019
+
+.. contents::
+    :depth: 3
+
+
+Basic overview
+==============
+
+IMC (In-Memory collection counters) is a hardware monitoring facility that
+collects large numbers of hardware performance events at Nest level (these are
+on-chip but off-core), Core level and Thread level.
+
+The Nest PMU counters are handled by a Nest IMC microcode which runs in the OCC
+(On-Chip Controller) complex. The microcode collects the counter data and moves
+the nest IMC counter data to memory.
+
+The Core and Thread IMC PMU counters are handled in the core. Core level PMU
+counters give us the IMC counters' data per core and thread level PMU counters
+give us the IMC counters' data per CPU thread.
+
+OPAL obtains the IMC PMU and supported events information from the IMC Catalog
+and passes on to the kernel via the device tree. The event's information
+contains:
+
+- Event name
+- Event Offset
+- Event description
+
+and possibly also:
+
+- Event scale
+- Event unit
+
+Some PMUs may have a common scale and unit values for all their supported
+events. For those cases, the scale and unit properties for those events must be
+inherited from the PMU.
+
+The event offset in the memory is where the counter data gets accumulated.
+
+IMC catalog is available at:
+       https://github.com/open-power/ima-catalog
+
+The kernel discovers the IMC counters information in the device tree at the
+`imc-counters` device node which has a compatible field
+`ibm,opal-in-memory-counters`. From the device tree, the kernel parses the PMUs
+and their event's information and register the PMU and its attributes in the
+kernel.
+
+IMC example usage
+=================
+
+.. code-block:: sh
+
+  # perf list
+  [...]
+  nest_mcs01/PM_MCS01_64B_RD_DISP_PORT01/            [Kernel PMU event]
+  nest_mcs01/PM_MCS01_64B_RD_DISP_PORT23/            [Kernel PMU event]
+  [...]
+  core_imc/CPM_0THRD_NON_IDLE_PCYC/                  [Kernel PMU event]
+  core_imc/CPM_1THRD_NON_IDLE_INST/                  [Kernel PMU event]
+  [...]
+  thread_imc/CPM_0THRD_NON_IDLE_PCYC/                [Kernel PMU event]
+  thread_imc/CPM_1THRD_NON_IDLE_INST/                [Kernel PMU event]
+
+To see per chip data for nest_mcs0/PM_MCS_DOWN_128B_DATA_XFER_MC0/:
+
+.. code-block:: sh
+
+  # ./perf stat -e "nest_mcs01/PM_MCS01_64B_WR_DISP_PORT01/" -a --per-socket
+
+To see non-idle instructions for core 0:
+
+.. code-block:: sh
+
+  # ./perf stat -e "core_imc/CPM_NON_IDLE_INST/" -C 0 -I 1000
+
+To see non-idle instructions for a "make":
+
+.. code-block:: sh
+
+  # ./perf stat -e "thread_imc/CPM_NON_IDLE_PCYC/" make
+
+
+IMC Trace-mode
+===============
+
+POWER9 supports two modes for IMC which are the Accumulation mode and Trace
+mode. In Accumulation mode, event counts are accumulated in system Memory.
+Hypervisor then reads the posted counts periodically or when requested. In IMC
+Trace mode, the 64 bit trace SCOM value is initialized with the event
+information. The CPMCxSEL and CPMC_LOAD in the trace SCOM, specifies the event
+to be monitored and the sampling duration. On each overflow in the CPMCxSEL,
+hardware snapshots the program counter along with event counts and writes into
+memory pointed by LDBAR.
+
+LDBAR is a 64 bit special purpose per thread register, it has bits to indicate
+whether hardware is configured for accumulation or trace mode.
+
+LDBAR Register Layout
+---------------------
+
+  +-------+----------------------+
+  | 0     | Enable/Disable       |
+  +-------+----------------------+
+  | 1     | 0: Accumulation Mode |
+  |       +----------------------+
+  |       | 1: Trace Mode        |
+  +-------+----------------------+
+  | 2:3   | Reserved             |
+  +-------+----------------------+
+  | 4-6   | PB scope             |
+  +-------+----------------------+
+  | 7     | Reserved             |
+  +-------+----------------------+
+  | 8:50  | Counter Address      |
+  +-------+----------------------+
+  | 51:63 | Reserved             |
+  +-------+----------------------+
+
+TRACE_IMC_SCOM bit representation
+---------------------------------
+
+  +-------+------------+
+  | 0:1   | SAMPSEL    |
+  +-------+------------+
+  | 2:33  | CPMC_LOAD  |
+  +-------+------------+
+  | 34:40 | CPMC1SEL   |
+  +-------+------------+
+  | 41:47 | CPMC2SEL   |
+  +-------+------------+
+  | 48:50 | BUFFERSIZE |
+  +-------+------------+
+  | 51:63 | RESERVED   |
+  +-------+------------+
+
+CPMC_LOAD contains the sampling duration. SAMPSEL and CPMCxSEL determines the
+event to count. BUFFERSIZE indicates the memory range. On each overflow,
+hardware snapshots the program counter along with event counts and updates the
+memory and reloads the CMPC_LOAD value for the next sampling duration. IMC
+hardware does not support exceptions, so it quietly wraps around if memory
+buffer reaches the end.
+
+*Currently the event monitored for trace-mode is fixed as cycle.*
+
+Trace IMC example usage
+=======================
+
+.. code-block:: sh
+
+  # perf list
+  [....]
+  trace_imc/trace_cycles/                            [Kernel PMU event]
+
+To record an application/process with trace-imc event:
+
+.. code-block:: sh
+
+  # perf record -e trace_imc/trace_cycles/ yes > /dev/null
+  [ perf record: Woken up 1 times to write data ]
+  [ perf record: Captured and wrote 0.012 MB perf.data (21 samples) ]
+
+The `perf.data` generated, can be read using perf report.
+
+Benefits of using IMC trace-mode
+================================
+
+PMI (Performance Monitoring Interrupts) interrupt handling is avoided, since IMC
+trace mode snapshots the program counter and updates to the memory. And this
+also provide a way for the operating system to do instruction sampling in real
+time without PMI processing overhead.
+
+Performance data using `perf top` with and without trace-imc event.
+
+PMI interrupts count when `perf top` command is executed without trace-imc event.
+
+.. code-block:: sh
+
+  # grep PMI /proc/interrupts
+  PMI:          0          0          0          0   Performance monitoring interrupts
+  # ./perf top
+  ...
+  # grep PMI /proc/interrupts
+  PMI:      39735       8710      17338      17801   Performance monitoring interrupts
+  # ./perf top -e trace_imc/trace_cycles/
+  ...
+  # grep PMI /proc/interrupts
+  PMI:      39735       8710      17338      17801   Performance monitoring interrupts
+
+
+That is, the PMI interrupt counts do not increment when using the `trace_imc` event.
index ba5edb3..0d45f0f 100644 (file)
@@ -18,9 +18,11 @@ powerpc
     elfnote
     firmware-assisted-dump
     hvcs
+    imc
     isa-versions
     kaslr-booke32
     mpc52xx
+    papr_hcalls
     pci_iov_resource_on_powernv
     pmu-ebb
     ptrace
diff --git a/Documentation/powerpc/papr_hcalls.rst b/Documentation/powerpc/papr_hcalls.rst
new file mode 100644 (file)
index 0000000..3493631
--- /dev/null
@@ -0,0 +1,250 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+Hypercall Op-codes (hcalls)
+===========================
+
+Overview
+=========
+
+Virtualization on 64-bit Power Book3S Platforms is based on the PAPR
+specification [1]_ which describes the run-time environment for a guest
+operating system and how it should interact with the hypervisor for
+privileged operations. Currently there are two PAPR compliant hypervisors:
+
+- **IBM PowerVM (PHYP)**: IBM's proprietary hypervisor that supports AIX,
+  IBM-i and  Linux as supported guests (termed as Logical Partitions
+  or LPARS). It supports the full PAPR specification.
+
+- **Qemu/KVM**: Supports PPC64 linux guests running on a PPC64 linux host.
+  Though it only implements a subset of PAPR specification called LoPAPR [2]_.
+
+On PPC64 arch a guest kernel running on top of a PAPR hypervisor is called
+a *pSeries guest*. A pseries guest runs in a supervisor mode (HV=0) and must
+issue hypercalls to the hypervisor whenever it needs to perform an action
+that is hypervisor priviledged [3]_ or for other services managed by the
+hypervisor.
+
+Hence a Hypercall (hcall) is essentially a request by the pseries guest
+asking hypervisor to perform a privileged operation on behalf of the guest. The
+guest issues a with necessary input operands. The hypervisor after performing
+the privilege operation returns a status code and output operands back to the
+guest.
+
+HCALL ABI
+=========
+The ABI specification for a hcall between a pseries guest and PAPR hypervisor
+is covered in section 14.5.3 of ref [2]_. Switch to the  Hypervisor context is
+done via the instruction **HVCS** that expects the Opcode for hcall is set in *r3*
+and any in-arguments for the hcall are provided in registers *r4-r12*. If values
+have to be passed through a memory buffer, the data stored in that buffer should be
+in Big-endian byte order.
+
+Once control is returns back to the guest after hypervisor has serviced the
+'HVCS' instruction the return value of the hcall is available in *r3* and any
+out values are returned in registers *r4-r12*. Again like in case of in-arguments,
+any out values stored in a memory buffer will be in Big-endian byte order.
+
+Powerpc arch code provides convenient wrappers named **plpar_hcall_xxx** defined
+in a arch specific header [4]_ to issue hcalls from the linux kernel
+running as pseries guest.
+
+Register Conventions
+====================
+
+Any hcall should follow same register convention as described in section 2.2.1.1
+of "64-Bit ELF V2 ABI Specification: Power Architecture"[5]_. Table below
+summarizes these conventions:
+
++----------+----------+-------------------------------------------+
+| Register |Volatile  |  Purpose                                  |
+| Range    |(Y/N)     |                                           |
++==========+==========+===========================================+
+|   r0     |    Y     |  Optional-usage                           |
++----------+----------+-------------------------------------------+
+|   r1     |    N     |  Stack Pointer                            |
++----------+----------+-------------------------------------------+
+|   r2     |    N     |  TOC                                      |
++----------+----------+-------------------------------------------+
+|   r3     |    Y     |  hcall opcode/return value                |
++----------+----------+-------------------------------------------+
+|  r4-r10  |    Y     |  in and out values                        |
++----------+----------+-------------------------------------------+
+|   r11    |    Y     |  Optional-usage/Environmental pointer     |
++----------+----------+-------------------------------------------+
+|   r12    |    Y     |  Optional-usage/Function entry address at |
+|          |          |  global entry point                       |
++----------+----------+-------------------------------------------+
+|   r13    |    N     |  Thread-Pointer                           |
++----------+----------+-------------------------------------------+
+|  r14-r31 |    N     |  Local Variables                          |
++----------+----------+-------------------------------------------+
+|    LR    |    Y     |  Link Register                            |
++----------+----------+-------------------------------------------+
+|   CTR    |    Y     |  Loop Counter                             |
++----------+----------+-------------------------------------------+
+|   XER    |    Y     |  Fixed-point exception register.          |
++----------+----------+-------------------------------------------+
+|  CR0-1   |    Y     |  Condition register fields.               |
++----------+----------+-------------------------------------------+
+|  CR2-4   |    N     |  Condition register fields.               |
++----------+----------+-------------------------------------------+
+|  CR5-7   |    Y     |  Condition register fields.               |
++----------+----------+-------------------------------------------+
+|  Others  |    N     |                                           |
++----------+----------+-------------------------------------------+
+
+DRC & DRC Indexes
+=================
+::
+
+     DR1                                  Guest
+     +--+        +------------+         +---------+
+     |  | <----> |            |         |  User   |
+     +--+  DRC1  |            |   DRC   |  Space  |
+                 |    PAPR    |  Index  +---------+
+     DR2         | Hypervisor |         |         |
+     +--+        |            | <-----> |  Kernel |
+     |  | <----> |            |  Hcall  |         |
+     +--+  DRC2  +------------+         +---------+
+
+PAPR hypervisor terms shared hardware resources like PCI devices, NVDIMMs etc
+available for use by LPARs as Dynamic Resource (DR). When a DR is allocated to
+an LPAR, PHYP creates a data-structure called Dynamic Resource Connector (DRC)
+to manage LPAR access. An LPAR refers to a DRC via an opaque 32-bit number
+called DRC-Index. The DRC-index value is provided to the LPAR via device-tree
+where its present as an attribute in the device tree node associated with the
+DR.
+
+HCALL Return-values
+===================
+
+After servicing the hcall, hypervisor sets the return-value in *r3* indicating
+success or failure of the hcall. In case of a failure an error code indicates
+the cause for error. These codes are defined and documented in arch specific
+header [4]_.
+
+In some cases a hcall can potentially take a long time and need to be issued
+multiple times in order to be completely serviced. These hcalls will usually
+accept an opaque value *continue-token* within there argument list and a
+return value of *H_CONTINUE* indicates that hypervisor hasn't still finished
+servicing the hcall yet.
+
+To make such hcalls the guest need to set *continue-token == 0* for the
+initial call and use the hypervisor returned value of *continue-token*
+for each subsequent hcall until hypervisor returns a non *H_CONTINUE*
+return value.
+
+HCALL Op-codes
+==============
+
+Below is a partial list of HCALLs that are supported by PHYP. For the
+corresponding opcode values please look into the arch specific header [4]_:
+
+**H_SCM_READ_METADATA**
+
+| Input: *drcIndex, offset, buffer-address, numBytesToRead*
+| Out: *numBytesRead*
+| Return Value: *H_Success, H_Parameter, H_P2, H_P3, H_Hardware*
+
+Given a DRC Index of an NVDIMM, read N-bytes from the the metadata area
+associated with it, at a specified offset and copy it to provided buffer.
+The metadata area stores configuration information such as label information,
+bad-blocks etc. The metadata area is located out-of-band of NVDIMM storage
+area hence a separate access semantics is provided.
+
+**H_SCM_WRITE_METADATA**
+
+| Input: *drcIndex, offset, data, numBytesToWrite*
+| Out: *None*
+| Return Value: *H_Success, H_Parameter, H_P2, H_P4, H_Hardware*
+
+Given a DRC Index of an NVDIMM, write N-bytes to the metadata area
+associated with it, at the specified offset and from the provided buffer.
+
+**H_SCM_BIND_MEM**
+
+| Input: *drcIndex, startingScmBlockIndex, numScmBlocksToBind,*
+| *targetLogicalMemoryAddress, continue-token*
+| Out: *continue-token, targetLogicalMemoryAddress, numScmBlocksToBound*
+| Return Value: *H_Success, H_Parameter, H_P2, H_P3, H_P4, H_Overlap,*
+| *H_Too_Big, H_P5, H_Busy*
+
+Given a DRC-Index of an NVDIMM, map a continuous SCM blocks range
+*(startingScmBlockIndex, startingScmBlockIndex+numScmBlocksToBind)* to the guest
+at *targetLogicalMemoryAddress* within guest physical address space. In
+case *targetLogicalMemoryAddress == 0xFFFFFFFF_FFFFFFFF* then hypervisor
+assigns a target address to the guest. The HCALL can fail if the Guest has
+an active PTE entry to the SCM block being bound.
+
+**H_SCM_UNBIND_MEM**
+| Input: drcIndex, startingScmLogicalMemoryAddress, numScmBlocksToUnbind
+| Out: numScmBlocksUnbound
+| Return Value: *H_Success, H_Parameter, H_P2, H_P3, H_In_Use, H_Overlap,*
+| *H_Busy, H_LongBusyOrder1mSec, H_LongBusyOrder10mSec*
+
+Given a DRC-Index of an NVDimm, unmap *numScmBlocksToUnbind* SCM blocks starting
+at *startingScmLogicalMemoryAddress* from guest physical address space. The
+HCALL can fail if the Guest has an active PTE entry to the SCM block being
+unbound.
+
+**H_SCM_QUERY_BLOCK_MEM_BINDING**
+
+| Input: *drcIndex, scmBlockIndex*
+| Out: *Guest-Physical-Address*
+| Return Value: *H_Success, H_Parameter, H_P2, H_NotFound*
+
+Given a DRC-Index and an SCM Block index return the guest physical address to
+which the SCM block is mapped to.
+
+**H_SCM_QUERY_LOGICAL_MEM_BINDING**
+
+| Input: *Guest-Physical-Address*
+| Out: *drcIndex, scmBlockIndex*
+| Return Value: *H_Success, H_Parameter, H_P2, H_NotFound*
+
+Given a guest physical address return which DRC Index and SCM block is mapped
+to that address.
+
+**H_SCM_UNBIND_ALL**
+
+| Input: *scmTargetScope, drcIndex*
+| Out: *None*
+| Return Value: *H_Success, H_Parameter, H_P2, H_P3, H_In_Use, H_Busy,*
+| *H_LongBusyOrder1mSec, H_LongBusyOrder10mSec*
+
+Depending on the Target scope unmap all SCM blocks belonging to all NVDIMMs
+or all SCM blocks belonging to a single NVDIMM identified by its drcIndex
+from the LPAR memory.
+
+**H_SCM_HEALTH**
+
+| Input: drcIndex
+| Out: *health-bitmap, health-bit-valid-bitmap*
+| Return Value: *H_Success, H_Parameter, H_Hardware*
+
+Given a DRC Index return the info on predictive failure and overall health of
+the NVDIMM. The asserted bits in the health-bitmap indicate a single predictive
+failure and health-bit-valid-bitmap indicate which bits in health-bitmap are
+valid.
+
+**H_SCM_PERFORMANCE_STATS**
+
+| Input: drcIndex, resultBuffer Addr
+| Out: None
+| Return Value:  *H_Success, H_Parameter, H_Unsupported, H_Hardware, H_Authority, H_Privilege*
+
+Given a DRC Index collect the performance statistics for NVDIMM and copy them
+to the resultBuffer.
+
+References
+==========
+.. [1] "Power Architecture Platform Reference"
+       https://en.wikipedia.org/wiki/Power_Architecture_Platform_Reference
+.. [2] "Linux on Power Architecture Platform Reference"
+       https://members.openpowerfoundation.org/document/dl/469
+.. [3] "Definitions and Notation" Book III-Section 14.5.3
+       https://openpowerfoundation.org/?resource_lib=power-isa-version-3-0
+.. [4] arch/powerpc/include/asm/hvcall.h
+.. [5] "64-Bit ELF V2 ABI Specification: Power Architecture"
+       https://openpowerfoundation.org/?resource_lib=64-bit-elf-v2-abi-specification-power-architecture
diff --git a/Documentation/trace/boottime-trace.rst b/Documentation/trace/boottime-trace.rst
new file mode 100644 (file)
index 0000000..dcb3900
--- /dev/null
@@ -0,0 +1,184 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Boot-time tracing
+=================
+
+:Author: Masami Hiramatsu <mhiramat@kernel.org>
+
+Overview
+========
+
+Boot-time tracing allows users to trace boot-time process including
+device initialization with full features of ftrace including per-event
+filter and actions, histograms, kprobe-events and synthetic-events,
+and trace instances.
+Since kernel command line is not enough to control these complex features,
+this uses bootconfig file to describe tracing feature programming.
+
+Options in the Boot Config
+==========================
+
+Here is the list of available options list for boot time tracing in
+boot config file [1]_. All options are under "ftrace." or "kernel."
+prefix. See kernel parameters for the options which starts
+with "kernel." prefix [2]_.
+
+.. [1] See :ref:`Documentation/admin-guide/bootconfig.rst <bootconfig>`
+.. [2] See :ref:`Documentation/admin-guide/kernel-parameters.rst <kernelparameters>`
+
+Ftrace Global Options
+---------------------
+
+Ftrace global options have "kernel." prefix in boot config, which means
+these options are passed as a part of kernel legacy command line.
+
+kernel.tp_printk
+   Output trace-event data on printk buffer too.
+
+kernel.dump_on_oops [= MODE]
+   Dump ftrace on Oops. If MODE = 1 or omitted, dump trace buffer
+   on all CPUs. If MODE = 2, dump a buffer on a CPU which kicks Oops.
+
+kernel.traceoff_on_warning
+   Stop tracing if WARN_ON() occurs.
+
+kernel.fgraph_max_depth = MAX_DEPTH
+   Set MAX_DEPTH to maximum depth of fgraph tracer.
+
+kernel.fgraph_filters = FILTER[, FILTER2...]
+   Add fgraph tracing function filters.
+
+kernel.fgraph_notraces = FILTER[, FILTER2...]
+   Add fgraph non-tracing function filters.
+
+
+Ftrace Per-instance Options
+---------------------------
+
+These options can be used for each instance including global ftrace node.
+
+ftrace.[instance.INSTANCE.]options = OPT1[, OPT2[...]]
+   Enable given ftrace options.
+
+ftrace.[instance.INSTANCE.]trace_clock = CLOCK
+   Set given CLOCK to ftrace's trace_clock.
+
+ftrace.[instance.INSTANCE.]buffer_size = SIZE
+   Configure ftrace buffer size to SIZE. You can use "KB" or "MB"
+   for that SIZE.
+
+ftrace.[instance.INSTANCE.]alloc_snapshot
+   Allocate snapshot buffer.
+
+ftrace.[instance.INSTANCE.]cpumask = CPUMASK
+   Set CPUMASK as trace cpu-mask.
+
+ftrace.[instance.INSTANCE.]events = EVENT[, EVENT2[...]]
+   Enable given events on boot. You can use a wild card in EVENT.
+
+ftrace.[instance.INSTANCE.]tracer = TRACER
+   Set TRACER to current tracer on boot. (e.g. function)
+
+ftrace.[instance.INSTANCE.]ftrace.filters
+   This will take an array of tracing function filter rules.
+
+ftrace.[instance.INSTANCE.]ftrace.notraces
+   This will take an array of NON-tracing function filter rules.
+
+
+Ftrace Per-Event Options
+------------------------
+
+These options are setting per-event options.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.enable
+   Enable GROUP:EVENT tracing.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.filter = FILTER
+   Set FILTER rule to the GROUP:EVENT.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.actions = ACTION[, ACTION2[...]]
+   Set ACTIONs to the GROUP:EVENT.
+
+ftrace.[instance.INSTANCE.]event.kprobes.EVENT.probes = PROBE[, PROBE2[...]]
+   Defines new kprobe event based on PROBEs. It is able to define
+   multiple probes on one event, but those must have same type of
+   arguments. This option is available only for the event which
+   group name is "kprobes".
+
+ftrace.[instance.INSTANCE.]event.synthetic.EVENT.fields = FIELD[, FIELD2[...]]
+   Defines new synthetic event with FIELDs. Each field should be
+   "type varname".
+
+Note that kprobe and synthetic event definitions can be written under
+instance node, but those are also visible from other instances. So please
+take care for event name conflict.
+
+
+Examples
+========
+
+For example, to add filter and actions for each event, define kprobe
+events, and synthetic events with histogram, write a boot config like
+below::
+
+  ftrace.event {
+        task.task_newtask {
+                filter = "pid < 128"
+                enable
+        }
+        kprobes.vfs_read {
+                probes = "vfs_read $arg1 $arg2"
+                filter = "common_pid < 200"
+                enable
+        }
+        synthetic.initcall_latency {
+                fields = "unsigned long func", "u64 lat"
+                actions = "hist:keys=func.sym,lat:vals=lat:sort=lat"
+        }
+        initcall.initcall_start {
+                actions = "hist:keys=func:ts0=common_timestamp.usecs"
+        }
+        initcall.initcall_finish {
+                actions = "hist:keys=func:lat=common_timestamp.usecs-$ts0:onmatch(initcall.initcall_start).initcall_latency(func,$lat)"
+        }
+  }
+
+Also, boot-time tracing supports "instance" node, which allows us to run
+several tracers for different purpose at once. For example, one tracer
+is for tracing functions starting with "user\_", and others tracing
+"kernel\_" functions, you can write boot config as below::
+
+  ftrace.instance {
+        foo {
+                tracer = "function"
+                ftrace.filters = "user_*"
+        }
+        bar {
+                tracer = "function"
+                ftrace.filters = "kernel_*"
+        }
+  }
+
+The instance node also accepts event nodes so that each instance
+can customize its event tracing.
+
+This boot-time tracing also supports ftrace kernel parameters via boot
+config.
+For example, following kernel parameters::
+
+ trace_options=sym-addr trace_event=initcall:* tp_printk trace_buf_size=1M ftrace=function ftrace_filter="vfs*"
+
+This can be written in boot config like below::
+
+  kernel {
+        trace_options = sym-addr
+        trace_event = "initcall:*"
+        tp_printk
+        trace_buf_size = 1M
+        ftrace = function
+        ftrace_filter = "vfs*"
+  }
+
+Note that parameters start with "kernel" prefix instead of "ftrace".
index f7e1fcc..ed79b22 100644 (file)
@@ -525,3 +525,518 @@ The following commands are supported:
   event counts (hitcount).
 
   See Documentation/trace/histogram.rst for details and examples.
+
+6.3 In-kernel trace event API
+-----------------------------
+
+In most cases, the command-line interface to trace events is more than
+sufficient.  Sometimes, however, applications might find the need for
+more complex relationships than can be expressed through a simple
+series of linked command-line expressions, or putting together sets of
+commands may be simply too cumbersome.  An example might be an
+application that needs to 'listen' to the trace stream in order to
+maintain an in-kernel state machine detecting, for instance, when an
+illegal kernel state occurs in the scheduler.
+
+The trace event subsystem provides an in-kernel API allowing modules
+or other kernel code to generate user-defined 'synthetic' events at
+will, which can be used to either augment the existing trace stream
+and/or signal that a particular important state has occurred.
+
+A similar in-kernel API is also available for creating kprobe and
+kretprobe events.
+
+Both the synthetic event and k/ret/probe event APIs are built on top
+of a lower-level "dynevent_cmd" event command API, which is also
+available for more specialized applications, or as the basis of other
+higher-level trace event APIs.
+
+The API provided for these purposes is describe below and allows the
+following:
+
+  - dynamically creating synthetic event definitions
+  - dynamically creating kprobe and kretprobe event definitions
+  - tracing synthetic events from in-kernel code
+  - the low-level "dynevent_cmd" API
+
+6.3.1 Dyamically creating synthetic event definitions
+-----------------------------------------------------
+
+There are a couple ways to create a new synthetic event from a kernel
+module or other kernel code.
+
+The first creates the event in one step, using synth_event_create().
+In this method, the name of the event to create and an array defining
+the fields is supplied to synth_event_create().  If successful, a
+synthetic event with that name and fields will exist following that
+call.  For example, to create a new "schedtest" synthetic event:
+
+  ret = synth_event_create("schedtest", sched_fields,
+                           ARRAY_SIZE(sched_fields), THIS_MODULE);
+
+The sched_fields param in this example points to an array of struct
+synth_field_desc, each of which describes an event field by type and
+name:
+
+  static struct synth_field_desc sched_fields[] = {
+        { .type = "pid_t",              .name = "next_pid_field" },
+        { .type = "char[16]",           .name = "next_comm_field" },
+        { .type = "u64",                .name = "ts_ns" },
+        { .type = "u64",                .name = "ts_ms" },
+        { .type = "unsigned int",       .name = "cpu" },
+        { .type = "char[64]",           .name = "my_string_field" },
+        { .type = "int",                .name = "my_int_field" },
+  };
+
+See synth_field_size() for available types. If field_name contains [n]
+the field is considered to be an array.
+
+If the event is created from within a module, a pointer to the module
+must be passed to synth_event_create().  This will ensure that the
+trace buffer won't contain unreadable events when the module is
+removed.
+
+At this point, the event object is ready to be used for generating new
+events.
+
+In the second method, the event is created in several steps.  This
+allows events to be created dynamically and without the need to create
+and populate an array of fields beforehand.
+
+To use this method, an empty or partially empty synthetic event should
+first be created using synth_event_gen_cmd_start() or
+synth_event_gen_cmd_array_start().  For synth_event_gen_cmd_start(),
+the name of the event along with one or more pairs of args each pair
+representing a 'type field_name;' field specification should be
+supplied.  For synth_event_gen_cmd_array_start(), the name of the
+event along with an array of struct synth_field_desc should be
+supplied. Before calling synth_event_gen_cmd_start() or
+synth_event_gen_cmd_array_start(), the user should create and
+initialize a dynevent_cmd object using synth_event_cmd_init().
+
+For example, to create a new "schedtest" synthetic event with two
+fields:
+
+  struct dynevent_cmd cmd;
+  char *buf;
+
+  /* Create a buffer to hold the generated command */
+  buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+  /* Before generating the command, initialize the cmd object */
+  synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+  ret = synth_event_gen_cmd_start(&cmd, "schedtest", THIS_MODULE,
+                                  "pid_t", "next_pid_field",
+                                  "u64", "ts_ns");
+
+Alternatively, using an array of struct synth_field_desc fields
+containing the same information:
+
+  ret = synth_event_gen_cmd_array_start(&cmd, "schedtest", THIS_MODULE,
+                                        fields, n_fields);
+
+Once the synthetic event object has been created, it can then be
+populated with more fields.  Fields are added one by one using
+synth_event_add_field(), supplying the dynevent_cmd object, a field
+type, and a field name.  For example, to add a new int field named
+"intfield", the following call should be made:
+
+  ret = synth_event_add_field(&cmd, "int", "intfield");
+
+See synth_field_size() for available types. If field_name contains [n]
+the field is considered to be an array.
+
+A group of fields can also be added all at once using an array of
+synth_field_desc with add_synth_fields().  For example, this would add
+just the first four sched_fields:
+
+  ret = synth_event_add_fields(&cmd, sched_fields, 4);
+
+If you already have a string of the form 'type field_name',
+synth_event_add_field_str() can be used to add it as-is; it will
+also automatically append a ';' to the string.
+
+Once all the fields have been added, the event should be finalized and
+registered by calling the synth_event_gen_cmd_end() function:
+
+  ret = synth_event_gen_cmd_end(&cmd);
+
+At this point, the event object is ready to be used for tracing new
+events.
+
+6.3.3 Tracing synthetic events from in-kernel code
+--------------------------------------------------
+
+To trace a synthetic event, there are several options.  The first
+option is to trace the event in one call, using synth_event_trace()
+with a variable number of values, or synth_event_trace_array() with an
+array of values to be set.  A second option can be used to avoid the
+need for a pre-formed array of values or list of arguments, via
+synth_event_trace_start() and synth_event_trace_end() along with
+synth_event_add_next_val() or synth_event_add_val() to add the values
+piecewise.
+
+6.3.3.1 Tracing a synthetic event all at once
+---------------------------------------------
+
+To trace a synthetic event all at once, the synth_event_trace() or
+synth_event_trace_array() functions can be used.
+
+The synth_event_trace() function is passed the trace_event_file
+representing the synthetic event (which can be retrieved using
+trace_get_event_file() using the synthetic event name, "synthetic" as
+the system name, and the trace instance name (NULL if using the global
+trace array)), along with an variable number of u64 args, one for each
+synthetic event field, and the number of values being passed.
+
+So, to trace an event corresponding to the synthetic event definition
+above, code like the following could be used:
+
+  ret = synth_event_trace(create_synth_test, 7, /* number of values */
+                          444,             /* next_pid_field */
+                          (u64)"clackers", /* next_comm_field */
+                          1000000,         /* ts_ns */
+                          1000,            /* ts_ms */
+                          smp_processor_id(),/* cpu */
+                          (u64)"Thneed",   /* my_string_field */
+                          999);            /* my_int_field */
+
+All vals should be cast to u64, and string vals are just pointers to
+strings, cast to u64.  Strings will be copied into space reserved in
+the event for the string, using these pointers.
+
+Alternatively, the synth_event_trace_array() function can be used to
+accomplish the same thing.  It is passed the trace_event_file
+representing the synthetic event (which can be retrieved using
+trace_get_event_file() using the synthetic event name, "synthetic" as
+the system name, and the trace instance name (NULL if using the global
+trace array)), along with an array of u64, one for each synthetic
+event field.
+
+To trace an event corresponding to the synthetic event definition
+above, code like the following could be used:
+
+  u64 vals[7];
+
+  vals[0] = 777;                  /* next_pid_field */
+  vals[1] = (u64)"tiddlywinks";   /* next_comm_field */
+  vals[2] = 1000000;              /* ts_ns */
+  vals[3] = 1000;                 /* ts_ms */
+  vals[4] = smp_processor_id();   /* cpu */
+  vals[5] = (u64)"thneed";        /* my_string_field */
+  vals[6] = 398;                  /* my_int_field */
+
+The 'vals' array is just an array of u64, the number of which must
+match the number of field in the synthetic event, and which must be in
+the same order as the synthetic event fields.
+
+All vals should be cast to u64, and string vals are just pointers to
+strings, cast to u64.  Strings will be copied into space reserved in
+the event for the string, using these pointers.
+
+In order to trace a synthetic event, a pointer to the trace event file
+is needed.  The trace_get_event_file() function can be used to get
+it - it will find the file in the given trace instance (in this case
+NULL since the top trace array is being used) while at the same time
+preventing the instance containing it from going away:
+
+       schedtest_event_file = trace_get_event_file(NULL, "synthetic",
+                                                   "schedtest");
+
+Before tracing the event, it should be enabled in some way, otherwise
+the synthetic event won't actually show up in the trace buffer.
+
+To enable a synthetic event from the kernel, trace_array_set_clr_event()
+can be used (which is not specific to synthetic events, so does need
+the "synthetic" system name to be specified explicitly).
+
+To enable the event, pass 'true' to it:
+
+       trace_array_set_clr_event(schedtest_event_file->tr,
+                                 "synthetic", "schedtest", true);
+
+To disable it pass false:
+
+       trace_array_set_clr_event(schedtest_event_file->tr,
+                                 "synthetic", "schedtest", false);
+
+Finally, synth_event_trace_array() can be used to actually trace the
+event, which should be visible in the trace buffer afterwards:
+
+       ret = synth_event_trace_array(schedtest_event_file, vals,
+                                     ARRAY_SIZE(vals));
+
+To remove the synthetic event, the event should be disabled, and the
+trace instance should be 'put' back using trace_put_event_file():
+
+       trace_array_set_clr_event(schedtest_event_file->tr,
+                                 "synthetic", "schedtest", false);
+       trace_put_event_file(schedtest_event_file);
+
+If those have been successful, synth_event_delete() can be called to
+remove the event:
+
+       ret = synth_event_delete("schedtest");
+
+6.3.3.1 Tracing a synthetic event piecewise
+-------------------------------------------
+
+To trace a synthetic using the piecewise method described above, the
+synth_event_trace_start() function is used to 'open' the synthetic
+event trace:
+
+       struct synth_trace_state trace_state;
+
+       ret = synth_event_trace_start(schedtest_event_file, &trace_state);
+
+It's passed the trace_event_file representing the synthetic event
+using the same methods as described above, along with a pointer to a
+struct synth_trace_state object, which will be zeroed before use and
+used to maintain state between this and following calls.
+
+Once the event has been opened, which means space for it has been
+reserved in the trace buffer, the individual fields can be set.  There
+are two ways to do that, either one after another for each field in
+the event, which requires no lookups, or by name, which does.  The
+tradeoff is flexibility in doing the assignments vs the cost of a
+lookup per field.
+
+To assign the values one after the other without lookups,
+synth_event_add_next_val() should be used.  Each call is passed the
+same synth_trace_state object used in the synth_event_trace_start(),
+along with the value to set the next field in the event.  After each
+field is set, the 'cursor' points to the next field, which will be set
+by the subsequent call, continuing until all the fields have been set
+in order.  The same sequence of calls as in the above examples using
+this method would be (without error-handling code):
+
+       /* next_pid_field */
+       ret = synth_event_add_next_val(777, &trace_state);
+
+       /* next_comm_field */
+       ret = synth_event_add_next_val((u64)"slinky", &trace_state);
+
+       /* ts_ns */
+       ret = synth_event_add_next_val(1000000, &trace_state);
+
+       /* ts_ms */
+       ret = synth_event_add_next_val(1000, &trace_state);
+
+       /* cpu */
+       ret = synth_event_add_next_val(smp_processor_id(), &trace_state);
+
+       /* my_string_field */
+       ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state);
+
+       /* my_int_field */
+       ret = synth_event_add_next_val(395, &trace_state);
+
+To assign the values in any order, synth_event_add_val() should be
+used.  Each call is passed the same synth_trace_state object used in
+the synth_event_trace_start(), along with the field name of the field
+to set and the value to set it to.  The same sequence of calls as in
+the above examples using this method would be (without error-handling
+code):
+
+       ret = synth_event_add_val("next_pid_field", 777, &trace_state);
+       ret = synth_event_add_val("next_comm_field", (u64)"silly putty",
+                                 &trace_state);
+       ret = synth_event_add_val("ts_ns", 1000000, &trace_state);
+       ret = synth_event_add_val("ts_ms", 1000, &trace_state);
+       ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state);
+       ret = synth_event_add_val("my_string_field", (u64)"thneed_9",
+                                 &trace_state);
+       ret = synth_event_add_val("my_int_field", 3999, &trace_state);
+
+Note that synth_event_add_next_val() and synth_event_add_val() are
+incompatible if used within the same trace of an event - either one
+can be used but not both at the same time.
+
+Finally, the event won't be actually traced until it's 'closed',
+which is done using synth_event_trace_end(), which takes only the
+struct synth_trace_state object used in the previous calls:
+
+       ret = synth_event_trace_end(&trace_state);
+
+Note that synth_event_trace_end() must be called at the end regardless
+of whether any of the add calls failed (say due to a bad field name
+being passed in).
+
+6.3.4 Dyamically creating kprobe and kretprobe event definitions
+----------------------------------------------------------------
+
+To create a kprobe or kretprobe trace event from kernel code, the
+kprobe_event_gen_cmd_start() or kretprobe_event_gen_cmd_start()
+functions can be used.
+
+To create a kprobe event, an empty or partially empty kprobe event
+should first be created using kprobe_event_gen_cmd_start().  The name
+of the event and the probe location should be specfied along with one
+or args each representing a probe field should be supplied to this
+function.  Before calling kprobe_event_gen_cmd_start(), the user
+should create and initialize a dynevent_cmd object using
+kprobe_event_cmd_init().
+
+For example, to create a new "schedtest" kprobe event with two fields:
+
+  struct dynevent_cmd cmd;
+  char *buf;
+
+  /* Create a buffer to hold the generated command */
+  buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+  /* Before generating the command, initialize the cmd object */
+  kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+  /*
+   * Define the gen_kprobe_test event with the first 2 kprobe
+   * fields.
+   */
+  ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test", "do_sys_open",
+                                   "dfd=%ax", "filename=%dx");
+
+Once the kprobe event object has been created, it can then be
+populated with more fields.  Fields can be added using
+kprobe_event_add_fields(), supplying the dynevent_cmd object along
+with a variable arg list of probe fields.  For example, to add a
+couple additional fields, the following call could be made:
+
+  ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)");
+
+Once all the fields have been added, the event should be finalized and
+registered by calling the kprobe_event_gen_cmd_end() or
+kretprobe_event_gen_cmd_end() functions, depending on whether a kprobe
+or kretprobe command was started:
+
+  ret = kprobe_event_gen_cmd_end(&cmd);
+
+or
+
+  ret = kretprobe_event_gen_cmd_end(&cmd);
+
+At this point, the event object is ready to be used for tracing new
+events.
+
+Similarly, a kretprobe event can be created using
+kretprobe_event_gen_cmd_start() with a probe name and location and
+additional params such as $retval:
+
+  ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test",
+                                      "do_sys_open", "$retval");
+
+Similar to the synthetic event case, code like the following can be
+used to enable the newly created kprobe event:
+
+  gen_kprobe_test = trace_get_event_file(NULL, "kprobes", "gen_kprobe_test");
+
+  ret = trace_array_set_clr_event(gen_kprobe_test->tr,
+                                  "kprobes", "gen_kprobe_test", true);
+
+Finally, also similar to synthetic events, the following code can be
+used to give the kprobe event file back and delete the event:
+
+  trace_put_event_file(gen_kprobe_test);
+
+  ret = kprobe_event_delete("gen_kprobe_test");
+
+6.3.4 The "dynevent_cmd" low-level API
+--------------------------------------
+
+Both the in-kernel synthetic event and kprobe interfaces are built on
+top of a lower-level "dynevent_cmd" interface.  This interface is
+meant to provide the basis for higher-level interfaces such as the
+synthetic and kprobe interfaces, which can be used as examples.
+
+The basic idea is simple and amounts to providing a general-purpose
+layer that can be used to generate trace event commands.  The
+generated command strings can then be passed to the command-parsing
+and event creation code that already exists in the trace event
+subystem for creating the corresponding trace events.
+
+In a nutshell, the way it works is that the higher-level interface
+code creates a struct dynevent_cmd object, then uses a couple
+functions, dynevent_arg_add() and dynevent_arg_pair_add() to build up
+a command string, which finally causes the command to be executed
+using the dynevent_create() function.  The details of the interface
+are described below.
+
+The first step in building a new command string is to create and
+initialize an instance of a dynevent_cmd.  Here, for instance, we
+create a dynevent_cmd on the stack and initialize it:
+
+  struct dynevent_cmd cmd;
+  char *buf;
+  int ret;
+
+  buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+  dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_FOO,
+                    foo_event_run_command);
+
+The dynevent_cmd initialization needs to be given a user-specified
+buffer and the length of the buffer (MAX_DYNEVENT_CMD_LEN can be used
+for this purpose - at 2k it's generally too big to be comfortably put
+on the stack, so is dynamically allocated), a dynevent type id, which
+is meant to be used to check that further API calls are for the
+correct command type, and a pointer to an event-specific run_command()
+callback that will be called to actually execute the event-specific
+command function.
+
+Once that's done, the command string can by built up by successive
+calls to argument-adding functions.
+
+To add a single argument, define and initialize a struct dynevent_arg
+or struct dynevent_arg_pair object.  Here's an example of the simplest
+possible arg addition, which is simply to append the given string as
+a whitespace-separated argument to the command:
+
+  struct dynevent_arg arg;
+
+  dynevent_arg_init(&arg, NULL, 0);
+
+  arg.str = name;
+
+  ret = dynevent_arg_add(cmd, &arg);
+
+The arg object is first initialized using dynevent_arg_init() and in
+this case the parameters are NULL or 0, which means there's no
+optional sanity-checking function or separator appended to the end of
+the arg.
+
+Here's another more complicated example using an 'arg pair', which is
+used to create an argument that consists of a couple components added
+together as a unit, for example, a 'type field_name;' arg or a simple
+expression arg e.g. 'flags=%cx':
+
+  struct dynevent_arg_pair arg_pair;
+
+  dynevent_arg_pair_init(&arg_pair, dynevent_foo_check_arg_fn, 0, ';');
+
+  arg_pair.lhs = type;
+  arg_pair.rhs = name;
+
+  ret = dynevent_arg_pair_add(cmd, &arg_pair);
+
+Again, the arg_pair is first initialized, in this case with a callback
+function used to check the sanity of the args (for example, that
+neither part of the pair is NULL), along with a character to be used
+to add an operator between the pair (here none) and a separator to be
+appended onto the end of the arg pair (here ';').
+
+There's also a dynevent_str_add() function that can be used to simply
+add a string as-is, with no spaces, delimeters, or arg check.
+
+Any number of dynevent_*_add() calls can be made to build up the string
+(until its length surpasses cmd->maxlen).  When all the arguments have
+been added and the command string is complete, the only thing left to
+do is run the command, which happens by simply calling
+dynevent_create():
+
+  ret = dynevent_create(&cmd);
+
+At that point, if the return value is 0, the dynamic event has been
+created and is ready to use.
+
+See the dynevent_cmd function definitions themselves for the details
+of the API.
index 04acd27..fa9e1c7 100644 (file)
@@ -19,6 +19,7 @@ Linux Tracing Technologies
    events-msr
    mmiotrace
    histogram
+   boottime-trace
    hwlat_detector
    intel_th
    stm
index 5599305..705d730 100644 (file)
@@ -97,6 +97,7 @@ which shows given pointer in "symbol+offset" style.
 For $comm, the default type is "string"; any other type is invalid.
 
 .. _user_mem_access:
+
 User Memory Access
 ------------------
 Kprobe events supports user-space memory access. For that purpose, you can use
index 1f77fb8..f1d8800 100644 (file)
@@ -7378,6 +7378,7 @@ F:        drivers/hwtracing/
 HARDWARE SPINLOCK CORE
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+R:     Baolin Wang <baolin.wang7@gmail.com>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git hwspinlock-next
@@ -7657,9 +7658,8 @@ S:        Orphan
 F:     drivers/net/usb/hso.c
 
 HSR NETWORK PROTOCOL
-M:     Arvid Brodin <arvid.brodin@alten.se>
 L:     netdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     net/hsr/
 
 HT16K33 LED CONTROLLER DRIVER
@@ -8908,8 +8908,10 @@ L:       isdn4linux@listserv.isdn4linux.de (subscribers-only)
 L:     netdev@vger.kernel.org
 W:     http://www.isdn4linux.de
 S:     Maintained
-F:     drivers/isdn/mISDN
-F:     drivers/isdn/hardware
+F:     drivers/isdn/mISDN/
+F:     drivers/isdn/hardware/
+F:     drivers/isdn/Kconfig
+F:     drivers/isdn/Makefile
 
 ISDN/CMTP OVER BLUETOOTH
 M:     Karsten Keil <isdn@linux-pingi.de>
@@ -14104,7 +14106,6 @@ F:      include/linux/platform_data/rtc-*
 F:     tools/testing/selftests/rtc/
 
 REALTEK AUDIO CODECS
-M:     Bard Liao <bardliao@realtek.com>
 M:     Oder Chiou <oder_chiou@realtek.com>
 S:     Maintained
 F:     sound/soc/codecs/rt*
@@ -14945,8 +14946,8 @@ S:      Maintained
 F:     drivers/mmc/host/sdhci-omap.c
 
 SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER
-M:     Scott Bauer <scott.bauer@intel.com>
 M:     Jonathan Derrick <jonathan.derrick@intel.com>
+M:     Revanth Rajashekar <revanth.rajashekar@intel.com>
 L:     linux-block@vger.kernel.org
 S:     Supported
 F:     block/sed*
@@ -15932,6 +15933,15 @@ S:     Supported
 F:     Documentation/networking/device_drivers/stmicro/
 F:     drivers/net/ethernet/stmicro/stmmac/
 
+EXTRA BOOT CONFIG
+M:     Masami Hiramatsu <mhiramat@kernel.org>
+S:     Maintained
+F:     lib/bootconfig.c
+F:     fs/proc/bootconfig.c
+F:     include/linux/bootconfig.h
+F:     tools/bootconfig/*
+F:     Documentation/admin-guide/bootconfig.rst
+
 SUN3/3X
 M:     Sam Creasey <sammy@sammy.net>
 W:     http://sammy.net/sun3/
index 48b5e10..98de654 100644 (file)
@@ -393,18 +393,23 @@ config HAVE_ARCH_JUMP_LABEL
 config HAVE_ARCH_JUMP_LABEL_RELATIVE
        bool
 
-config HAVE_RCU_TABLE_FREE
+config MMU_GATHER_TABLE_FREE
        bool
 
-config HAVE_RCU_TABLE_NO_INVALIDATE
+config MMU_GATHER_RCU_TABLE_FREE
        bool
+       select MMU_GATHER_TABLE_FREE
 
-config HAVE_MMU_GATHER_PAGE_SIZE
+config MMU_GATHER_PAGE_SIZE
        bool
 
-config HAVE_MMU_GATHER_NO_GATHER
+config MMU_GATHER_NO_RANGE
        bool
 
+config MMU_GATHER_NO_GATHER
+       bool
+       depends on MMU_GATHER_TABLE_FREE
+
 config ARCH_HAVE_NMI_SAFE_CMPXCHG
        bool
 
index 7268222..528d2be 100644 (file)
@@ -119,13 +119,12 @@ static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
        return res;
 }
 
-static const struct file_operations srm_env_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = srm_env_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = srm_env_proc_write,
+static const struct proc_ops srm_env_proc_ops = {
+       .proc_open      = srm_env_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = srm_env_proc_write,
 };
 
 static int __init
@@ -182,7 +181,7 @@ srm_env_init(void)
        entry = srm_named_entries;
        while (entry->name && entry->id) {
                if (!proc_create_data(entry->name, 0644, named_dir,
-                            &srm_env_proc_fops, (void *)entry->id))
+                            &srm_env_proc_ops, (void *)entry->id))
                        goto cleanup;
                entry++;
        }
@@ -194,7 +193,7 @@ srm_env_init(void)
                char name[4];
                sprintf(name, "%ld", var_num);
                if (!proc_create_data(name, 0644, numbered_dir,
-                            &srm_env_proc_fops, (void *)var_num))
+                            &srm_env_proc_ops, (void *)var_num))
                        goto cleanup;
        }
 
index 9019ed9..12be7e1 100644 (file)
@@ -273,6 +273,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 #define pmd_none(x)                    (!pmd_val(x))
 #define        pmd_bad(x)                      ((pmd_val(x) & ~PAGE_MASK))
 #define pmd_present(x)                 (pmd_val(x))
+#define pmd_leaf(x)                    (pmd_val(x) & _PAGE_HW_SZ)
 #define pmd_clear(xp)                  do { pmd_val(*(xp)) = 0; } while (0)
 
 #define pte_page(pte)          pfn_to_page(pte_pfn(pte))
index 0b1b1c6..97864aa 100644 (file)
@@ -74,7 +74,7 @@ config ARM
        select HAVE_CONTEXT_TRACKING
        select HAVE_COPY_THREAD_TLS
        select HAVE_C_RECORDMCOUNT
-       select HAVE_DEBUG_KMEMLEAK
+       select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
        select HAVE_DMA_CONTIGUOUS if MMU
        select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
@@ -102,7 +102,7 @@ config ARM
        select HAVE_PERF_EVENTS
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
-       select HAVE_RCU_TABLE_FREE if SMP && ARM_LPAE
+       select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
@@ -1905,7 +1905,7 @@ config XIP_DEFLATED_DATA
 config KEXEC
        bool "Kexec system call (EXPERIMENTAL)"
        depends on (!SMP || PM_SLEEP_SMP)
-       depends on !CPU_V7M
+       depends on MMU
        select KEXEC_CORE
        help
          kexec is a system call that implements the ability to shutdown your
index a1e883c..da599c3 100644 (file)
@@ -110,12 +110,12 @@ endif
 
 # -fstack-protector-strong triggers protection checks in this code,
 # but it is being used too early to link to meaningful stack_chk logic.
-nossp_flags := $(call cc-option, -fno-stack-protector)
-CFLAGS_atags_to_fdt.o := $(nossp_flags)
-CFLAGS_fdt.o := $(nossp_flags)
-CFLAGS_fdt_ro.o := $(nossp_flags)
-CFLAGS_fdt_rw.o := $(nossp_flags)
-CFLAGS_fdt_wip.o := $(nossp_flags)
+nossp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector
+CFLAGS_atags_to_fdt.o := $(nossp-flags-y)
+CFLAGS_fdt.o := $(nossp-flags-y)
+CFLAGS_fdt_ro.o := $(nossp-flags-y)
+CFLAGS_fdt_rw.o := $(nossp-flags-y)
+CFLAGS_fdt_wip.o := $(nossp-flags-y)
 
 ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
 asflags-y := -DZIMAGE
index ead21e5..088b0a0 100644 (file)
 #endif
                .endm
 
+               .macro  enable_cp15_barriers, reg
+               mrc     p15, 0, \reg, c1, c0, 0 @ read SCTLR
+               tst     \reg, #(1 << 5)         @ CP15BEN bit set?
+               bne     .L_\@
+               orr     \reg, \reg, #(1 << 5)   @ CP15 barrier instructions
+               mcr     p15, 0, \reg, c1, c0, 0 @ write SCTLR
+ ARM(          .inst   0xf57ff06f              @ v7+ isb       )
+ THUMB(                isb                                             )
+.L_\@:
+               .endm
+
                .section ".start", "ax"
 /*
  * sort out different calling conventions
@@ -820,6 +831,7 @@ __armv4_mmu_cache_on:
                mov     pc, r12
 
 __armv7_mmu_cache_on:
+               enable_cp15_barriers    r11
                mov     r12, lr
 #ifdef CONFIG_MMU
                mrc     p15, 0, r11, c0, c1, 4  @ read ID_MMFR0
@@ -1209,6 +1221,7 @@ __armv6_mmu_cache_flush:
                mov     pc, lr
 
 __armv7_mmu_cache_flush:
+               enable_cp15_barriers    r10
                tst     r4, #1
                bne     iflush
                mrc     p15, 0, r10, c0, c1, 5  @ read ID_MMFR1
@@ -1447,21 +1460,7 @@ ENTRY(efi_stub_entry)
 
                @ Preserve return value of efi_entry() in r4
                mov     r4, r0
-
-               @ our cache maintenance code relies on CP15 barrier instructions
-               @ but since we arrived here with the MMU and caches configured
-               @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
-               @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
-               @ the enable path will be executed on v7+ only.
-               mrc     p15, 0, r1, c1, c0, 0   @ read SCTLR
-               tst     r1, #(1 << 5)           @ CP15BEN bit set?
-               bne     0f
-               orr     r1, r1, #(1 << 5)       @ CP15 barrier instructions
-               mcr     p15, 0, r1, c1, c0, 0   @ write SCTLR
- ARM(          .inst   0xf57ff06f              @ v7+ isb       )
- THUMB(                isb                                             )
-
-0:             bl      cache_clean_flush
+               bl      cache_clean_flush
                bl      cache_off
 
                @ Set parameters for booting zImage according to boot protocol
index 82eeba8..23244b5 100644 (file)
@@ -4,7 +4,7 @@
  */
 
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/ti-dra7-atl.h>
+#include <dt-bindings/clock/ti-dra7-atl.h>
 #include <dt-bindings/input/input.h>
 
 / {
index 8641a3d..9eabfd1 100644 (file)
@@ -6,7 +6,7 @@
 
 #include "dra72x.dtsi"
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/ti-dra7-atl.h>
+#include <dt-bindings/clock/ti-dra7-atl.h>
 
 / {
        compatible = "ti,dra72-evm", "ti,dra722", "ti,dra72", "ti,dra7";
index 93e1eb8..ccf0fd4 100644 (file)
                };
        };
 
+       gpu_cm: gpu-cm@1200 {
+               compatible = "ti,omap4-cm";
+               reg = <0x1200 0x100>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0 0x1200 0x100>;
+
+               gpu_clkctrl: gpu-clkctrl@20 {
+                       compatible = "ti,clkctrl";
+                       reg = <0x20 0x4>;
+                       #clock-cells = <2>;
+               };
+       };
+
        l3init_cm: l3init-cm@1300 {
                compatible = "ti,omap4-cm";
                reg = <0x1300 0x100>;
index 51beec4..0d3ea35 100644 (file)
@@ -189,6 +189,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 }
 
 #define pmd_large(pmd)         (pmd_val(pmd) & 2)
+#define pmd_leaf(pmd)          (pmd_val(pmd) & 2)
 #define pmd_bad(pmd)           (pmd_val(pmd) & 2)
 #define pmd_present(pmd)       (pmd_val(pmd))
 
index 5b18295..ad55ab0 100644 (file)
 #define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                                 PMD_TYPE_SECT)
 #define pmd_large(pmd)         pmd_sect(pmd)
+#define pmd_leaf(pmd)          pmd_sect(pmd)
 
 #define pud_clear(pudp)                        \
        do {                            \
index 010fa1a..30fb233 100644 (file)
 
 #define swapper_pg_dir ((pgd_t *) 0)
 
-#define __swp_type(x)          (0)
-#define __swp_offset(x)                (0)
-#define __swp_entry(typ,off)   ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
-
 
 typedef pte_t *pte_addr_t;
 
index 669474a..4d4e7b6 100644 (file)
@@ -37,10 +37,6 @@ static inline void __tlb_remove_table(void *_table)
 
 #include <asm-generic/tlb.h>
 
-#ifndef CONFIG_HAVE_RCU_TABLE_FREE
-#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
-#endif
-
 static inline void
 __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
 {
index 312cb89..4247ebf 100644 (file)
@@ -17,9 +17,9 @@ static ssize_t atags_read(struct file *file, char __user *buf,
        return simple_read_from_buffer(buf, count, ppos, b->data, b->size);
 }
 
-static const struct file_operations atags_fops = {
-       .read = atags_read,
-       .llseek = default_llseek,
+static const struct proc_ops atags_proc_ops = {
+       .proc_read      = atags_read,
+       .proc_lseek     = default_llseek,
 };
 
 #define BOOT_PARAMS_SIZE 1536
@@ -61,7 +61,7 @@ static int __init init_atags_procfs(void)
        b->size = size;
        memcpy(b->data, atags_copy, size);
 
-       tags_entry = proc_create_data("atags", 0400, NULL, &atags_fops, b);
+       tags_entry = proc_create_data("atags", 0400, NULL, &atags_proc_ops, b);
        if (!tags_entry)
                goto nomem;
 
index 71778bb..cc726af 100644 (file)
@@ -92,6 +92,8 @@ static int save_trace(struct stackframe *frame, void *d)
                return 0;
 
        regs = (struct pt_regs *)frame->sp;
+       if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
+               return 0;
 
        trace->entries[trace->nr_entries++] = regs->ARM_pc;
 
index abb7dd7..1e70e72 100644 (file)
@@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
 
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
+       unsigned long end = frame + 4 + sizeof(struct pt_regs);
+
 #ifdef CONFIG_KALLSYMS
        printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
 #else
        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
 
-       if (in_entry_text(from))
-               dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
+       if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
+               dump_mem("", "Exception stack", frame + 4, end);
 }
 
 void dump_backtrace_stm(u32 *stack, u32 instruction)
index 788c5cf..84718ed 100644 (file)
@@ -162,12 +162,12 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer
        return count;
 }
 
-static const struct file_operations alignment_proc_fops = {
-       .open           = alignment_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = alignment_proc_write,
+static const struct proc_ops alignment_proc_ops = {
+       .proc_open      = alignment_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = alignment_proc_write,
 };
 #endif /* CONFIG_PROC_FS */
 
@@ -1016,7 +1016,7 @@ static int __init alignment_init(void)
        struct proc_dir_entry *res;
 
        res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
-                         &alignment_proc_fops);
+                         &alignment_proc_ops);
        if (!res)
                return -ENOMEM;
 #endif
index e822af0..9414d72 100644 (file)
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
 
 static int __dma_supported(struct device *dev, u64 mask, bool warn)
 {
-       unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
+       unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
 
        /*
         * Translate the device's DMA mask to a PFN limit.  This
index 3ef2041..054be44 100644 (file)
@@ -324,7 +324,7 @@ static inline void poison_init_mem(void *s, size_t count)
                *p++ = 0xe7fddef0;
 }
 
-static inline void
+static inline void __init
 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
 {
        struct page *start_pg, *end_pg;
index de238b5..0b30e88 100644 (file)
@@ -104,6 +104,7 @@ config ARM64
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
        select GENERIC_PCI_IOMAP
+       select GENERIC_PTDUMP
        select GENERIC_SCHED_CLOCK
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER
@@ -164,7 +165,7 @@ config ARM64
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_FUNCTION_ARG_ACCESS_API
        select HAVE_FUTEX_CMPXCHG if FUTEX
-       select HAVE_RCU_TABLE_FREE
+       select MMU_GATHER_RCU_TABLE_FREE
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
index cf09010..1c906d9 100644 (file)
@@ -1,22 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-config ARM64_PTDUMP_CORE
-       def_bool n
-
-config ARM64_PTDUMP_DEBUGFS
-       bool "Export kernel pagetable layout to userspace via debugfs"
-       depends on DEBUG_KERNEL
-       select ARM64_PTDUMP_CORE
-       select DEBUG_FS
-        help
-         Say Y here if you want to show the kernel pagetable layout in a
-         debugfs file. This information is only useful for kernel developers
-         who are working in architecture specific areas of the kernel.
-         It is probably not a good idea to enable this feature in a production
-         kernel.
-
-         If in doubt, say N.
-
 config PID_IN_CONTEXTIDR
        bool "Write the current PID to the CONTEXTIDR register"
        help
@@ -42,7 +25,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
 
 config DEBUG_WX
        bool "Warn on W+X mappings at boot"
-       select ARM64_PTDUMP_CORE
+       select PTDUMP_CORE
        ---help---
          Generate a warning if any W+X mappings are found at boot.
 
index bd23f87..d3077c9 100644 (file)
@@ -3,7 +3,6 @@ generic-y += bugs.h
 generic-y += delay.h
 generic-y += div64.h
 generic-y += dma.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += early_ioremap.h
 generic-y += emergency-restart.h
index cd5de0e..538c85e 100644 (file)
@@ -441,6 +441,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                 PMD_TYPE_TABLE)
 #define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                 PMD_TYPE_SECT)
+#define pmd_leaf(pmd)          pmd_sect(pmd)
 
 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
 static inline bool pud_sect(pud_t pud) { return false; }
@@ -525,6 +526,7 @@ static inline void pte_unmap(pte_t *pte) { }
 #define pud_none(pud)          (!pud_val(pud))
 #define pud_bad(pud)           (!(pud_val(pud) & PUD_TABLE_BIT))
 #define pud_present(pud)       pte_present(pud_pte(pud))
+#define pud_leaf(pud)          pud_sect(pud)
 #define pud_valid(pud)         pte_valid(pud_pte(pud))
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
index 0b8e726..38187f7 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef __ASM_PTDUMP_H
 #define __ASM_PTDUMP_H
 
-#ifdef CONFIG_ARM64_PTDUMP_CORE
+#ifdef CONFIG_PTDUMP_CORE
 
 #include <linux/mm_types.h>
 #include <linux/seq_file.h>
@@ -21,15 +21,15 @@ struct ptdump_info {
        unsigned long                   base_addr;
 };
 
-void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
-#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_PTDUMP_DEBUGFS
 void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
 #else
 static inline void ptdump_debugfs_register(struct ptdump_info *info,
                                           const char *name) { }
 #endif
 void ptdump_check_wx(void);
-#endif /* CONFIG_ARM64_PTDUMP_CORE */
+#endif /* CONFIG_PTDUMP_CORE */
 
 #ifdef CONFIG_DEBUG_WX
 #define debug_checkwx()        ptdump_check_wx()
index 849c1df..d91030f 100644 (file)
@@ -4,8 +4,8 @@ obj-y                           := dma-mapping.o extable.o fault.o init.o \
                                   ioremap.o mmap.o pgd.o mmu.o \
                                   context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
-obj-$(CONFIG_ARM64_PTDUMP_CORE)        += dump.o
-obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS)     += ptdump_debugfs.o
+obj-$(CONFIG_PTDUMP_CORE)      += dump.o
+obj-$(CONFIG_PTDUMP_DEBUGFS)   += ptdump_debugfs.o
 obj-$(CONFIG_NUMA)             += numa.o
 obj-$(CONFIG_DEBUG_VIRTUAL)    += physaddr.o
 KASAN_SANITIZE_physaddr.o      += n
index 0a920b5..860c00e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include <linux/init.h>
 #include <linux/mm.h>
+#include <linux/ptdump.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 
@@ -75,10 +76,11 @@ static struct addr_marker address_markers[] = {
  * dumps out a description of the range.
  */
 struct pg_state {
+       struct ptdump_state ptdump;
        struct seq_file *seq;
        const struct addr_marker *marker;
        unsigned long start_address;
-       unsigned level;
+       int level;
        u64 current_prot;
        bool check_wx;
        unsigned long wx_pages;
@@ -174,11 +176,14 @@ struct pg_level {
 };
 
 static struct pg_level pg_level[] = {
-       {
-       }, { /* pgd */
+       { /* pgd */
                .name   = "PGD",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
+       }, { /* p4d */
+               .name   = "P4D",
+               .bits   = pte_bits,
+               .num    = ARRAY_SIZE(pte_bits),
        }, { /* pud */
                .name   = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
                .bits   = pte_bits,
@@ -241,13 +246,17 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
        st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
 }
 
-static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
-                               u64 val)
+static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
+                     unsigned long val)
 {
+       struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
        static const char units[] = "KMGTPE";
-       u64 prot = val & pg_level[level].mask;
+       u64 prot = 0;
+
+       if (level >= 0)
+               prot = val & pg_level[level].mask;
 
-       if (!st->level) {
+       if (st->level == -1) {
                st->level = level;
                st->current_prot = prot;
                st->start_address = addr;
@@ -260,21 +269,22 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
                if (st->current_prot) {
                        note_prot_uxn(st, addr);
                        note_prot_wx(st, addr);
-                       pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
+               }
+
+               pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
                                   st->start_address, addr);
 
-                       delta = (addr - st->start_address) >> 10;
-                       while (!(delta & 1023) && unit[1]) {
-                               delta >>= 10;
-                               unit++;
-                       }
-                       pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
-                                  pg_level[st->level].name);
-                       if (pg_level[st->level].bits)
-                               dump_prot(st, pg_level[st->level].bits,
-                                         pg_level[st->level].num);
-                       pt_dump_seq_puts(st->seq, "\n");
+               delta = (addr - st->start_address) >> 10;
+               while (!(delta & 1023) && unit[1]) {
+                       delta >>= 10;
+                       unit++;
                }
+               pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+                                  pg_level[st->level].name);
+               if (st->current_prot && pg_level[st->level].bits)
+                       dump_prot(st, pg_level[st->level].bits,
+                                 pg_level[st->level].num);
+               pt_dump_seq_puts(st->seq, "\n");
 
                if (addr >= st->marker[1].start_address) {
                        st->marker++;
@@ -293,85 +303,27 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 
 }
 
-static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
-                    unsigned long end)
-{
-       unsigned long addr = start;
-       pte_t *ptep = pte_offset_kernel(pmdp, start);
-
-       do {
-               note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
-       } while (ptep++, addr += PAGE_SIZE, addr != end);
-}
-
-static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
-                    unsigned long end)
-{
-       unsigned long next, addr = start;
-       pmd_t *pmdp = pmd_offset(pudp, start);
-
-       do {
-               pmd_t pmd = READ_ONCE(*pmdp);
-               next = pmd_addr_end(addr, end);
-
-               if (pmd_none(pmd) || pmd_sect(pmd)) {
-                       note_page(st, addr, 3, pmd_val(pmd));
-               } else {
-                       BUG_ON(pmd_bad(pmd));
-                       walk_pte(st, pmdp, addr, next);
-               }
-       } while (pmdp++, addr = next, addr != end);
-}
-
-static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
-                    unsigned long end)
+void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
 {
-       unsigned long next, addr = start;
-       pud_t *pudp = pud_offset(pgdp, start);
-
-       do {
-               pud_t pud = READ_ONCE(*pudp);
-               next = pud_addr_end(addr, end);
-
-               if (pud_none(pud) || pud_sect(pud)) {
-                       note_page(st, addr, 2, pud_val(pud));
-               } else {
-                       BUG_ON(pud_bad(pud));
-                       walk_pmd(st, pudp, addr, next);
-               }
-       } while (pudp++, addr = next, addr != end);
-}
+       unsigned long end = ~0UL;
+       struct pg_state st;
 
-static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
-                    unsigned long start)
-{
-       unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
-       unsigned long next, addr = start;
-       pgd_t *pgdp = pgd_offset(mm, start);
-
-       do {
-               pgd_t pgd = READ_ONCE(*pgdp);
-               next = pgd_addr_end(addr, end);
-
-               if (pgd_none(pgd)) {
-                       note_page(st, addr, 1, pgd_val(pgd));
-               } else {
-                       BUG_ON(pgd_bad(pgd));
-                       walk_pud(st, pgdp, addr, next);
-               }
-       } while (pgdp++, addr = next, addr != end);
-}
+       if (info->base_addr < TASK_SIZE_64)
+               end = TASK_SIZE_64;
 
-void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
-{
-       struct pg_state st = {
-               .seq = m,
+       st = (struct pg_state){
+               .seq = s,
                .marker = info->markers,
+               .ptdump = {
+                       .note_page = note_page,
+                       .range = (struct ptdump_range[]){
+                               {info->base_addr, end},
+                               {0, 0}
+                       }
+               }
        };
 
-       walk_pgd(&st, info->mm, info->base_addr);
-
-       note_page(&st, 0, 0, 0);
+       ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
 }
 
 static void ptdump_initialize(void)
@@ -398,11 +350,19 @@ void ptdump_check_wx(void)
                        { 0, NULL},
                        { -1, NULL},
                },
+               .level = -1,
                .check_wx = true,
+               .ptdump = {
+                       .note_page = note_page,
+                       .range = (struct ptdump_range[]) {
+                               {PAGE_OFFSET, ~0UL},
+                               {0, 0}
+                       }
+               }
        };
 
-       walk_pgd(&st, &init_mm, PAGE_OFFSET);
-       note_page(&st, 0, 0, 0);
+       ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
+
        if (st.wx_pages || st.uxn_pages)
                pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
                        st.wx_pages, st.uxn_pages);
index 40797cb..128f708 100644 (file)
@@ -943,13 +943,13 @@ int __init arch_ioremap_pud_supported(void)
         * SW table walks can't handle removal of intermediate entries.
         */
        return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
-              !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
+              !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
 int __init arch_ioremap_pmd_supported(void)
 {
        /* See arch_ioremap_pud_supported() */
-       return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
+       return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
index 064163f..1f2eae3 100644 (file)
@@ -7,7 +7,7 @@
 static int ptdump_show(struct seq_file *m, void *v)
 {
        struct ptdump_info *info = m->private;
-       ptdump_walk_pgd(m, info);
+       ptdump_walk(m, info);
        return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(ptdump);
index 4d4754e..bc15a26 100644 (file)
@@ -7,7 +7,6 @@ generic-y += delay.h
 generic-y += device.h
 generic-y += div64.h
 generic-y += dma.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
index b392c0a..a25ab9b 100644 (file)
@@ -331,10 +331,10 @@ retry:
        return size;
 }
 
-static const struct file_operations salinfo_event_fops = {
-       .open  = salinfo_event_open,
-       .read  = salinfo_event_read,
-       .llseek = noop_llseek,
+static const struct proc_ops salinfo_event_proc_ops = {
+       .proc_open      = salinfo_event_open,
+       .proc_read      = salinfo_event_read,
+       .proc_lseek     = noop_llseek,
 };
 
 static int
@@ -534,12 +534,12 @@ salinfo_log_write(struct file *file, const char __user *buffer, size_t count, lo
        return count;
 }
 
-static const struct file_operations salinfo_data_fops = {
-       .open    = salinfo_log_open,
-       .release = salinfo_log_release,
-       .read    = salinfo_log_read,
-       .write   = salinfo_log_write,
-       .llseek  = default_llseek,
+static const struct proc_ops salinfo_data_proc_ops = {
+       .proc_open      = salinfo_log_open,
+       .proc_release   = salinfo_log_release,
+       .proc_read      = salinfo_log_read,
+       .proc_write     = salinfo_log_write,
+       .proc_lseek     = default_llseek,
 };
 
 static int salinfo_cpu_online(unsigned int cpu)
@@ -617,13 +617,13 @@ salinfo_init(void)
                        continue;
 
                entry = proc_create_data("event", S_IRUSR, dir,
-                                        &salinfo_event_fops, data);
+                                        &salinfo_event_proc_ops, data);
                if (!entry)
                        continue;
                *sdir++ = entry;
 
                entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir,
-                                        &salinfo_data_fops, data);
+                                        &salinfo_data_proc_ops, data);
                if (!entry)
                        continue;
                *sdir++ = entry;
index d5e683d..3a84f24 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5307=y
 CONFIG_AMCORE=y
index a3102ff..0ee3079 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 # CONFIG_4KSTACKS is not set
 CONFIG_RAMBASE=0x40000000
index f7bb9ed..f84f68c 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5249=y
 CONFIG_M5249C3=y
index 1e679f6..eca6502 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5272=y
 CONFIG_M5272C3=y
index d2987b4..9402c7a 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5275=y
 # CONFIG_4KSTACKS is not set
index 97a78c9..bb8b0eb 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5307=y
 CONFIG_M5307C3=y
index 766a97f..ce9ccf1 100644 (file)
@@ -11,8 +11,6 @@ CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5407=y
 CONFIG_M5407C3=y
index 579fd98..93f7c7a 100644 (file)
@@ -11,8 +11,6 @@ CONFIG_LOG_BUF_SHIFT=14
 CONFIG_EMBEDDED=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_COLDFIRE=y
 # CONFIG_4KSTACKS is not set
 CONFIG_RAMBASE=0x0
index 0134008..6bc80c3 100644 (file)
@@ -71,26 +71,29 @@ extern int __put_user_bad(void);
 #define get_user(x, ptr)                                       \
 ({                                                             \
     int __gu_err = 0;                                          \
-    typeof(x) __gu_val = 0;                                    \
     switch (sizeof(*(ptr))) {                                  \
     case 1:                                                    \
-       __get_user_asm(__gu_err, __gu_val, ptr, b, "=d");       \
+       __get_user_asm(__gu_err, x, ptr, b, "=d");              \
        break;                                                  \
     case 2:                                                    \
-       __get_user_asm(__gu_err, __gu_val, ptr, w, "=r");       \
+       __get_user_asm(__gu_err, x, ptr, w, "=r");              \
        break;                                                  \
     case 4:                                                    \
-       __get_user_asm(__gu_err, __gu_val, ptr, l, "=r");       \
+       __get_user_asm(__gu_err, x, ptr, l, "=r");              \
        break;                                                  \
-    case 8:                                                    \
-       memcpy((void *) &__gu_val, ptr, sizeof (*(ptr)));       \
+    case 8: {                                                  \
+       union {                                                 \
+           u64 l;                                              \
+           __typeof__(*(ptr)) t;                               \
+       } __gu_val;                                             \
+       memcpy(&__gu_val.l, ptr, sizeof(__gu_val.l));           \
+       (x) = __gu_val.t;                                       \
        break;                                                  \
+    }                                                          \
     default:                                                   \
-       __gu_val = 0;                                           \
        __gu_err = __get_user_bad();                            \
        break;                                                  \
     }                                                          \
-    (x) = (typeof(*(ptr))) __gu_val;                           \
     __gu_err;                                                  \
 })
 #define __get_user(x, ptr) get_user(x, ptr)
index 3b9cab8..857fa2a 100644 (file)
@@ -26,9 +26,9 @@ static ssize_t bootinfo_read(struct file *file, char __user *buf,
                                       bootinfo_size);
 }
 
-static const struct file_operations bootinfo_fops = {
-       .read = bootinfo_read,
-       .llseek = default_llseek,
+static const struct proc_ops bootinfo_proc_ops = {
+       .proc_read      = bootinfo_read,
+       .proc_lseek     = default_llseek,
 };
 
 void __init save_bootinfo(const struct bi_record *bi)
@@ -67,7 +67,7 @@ static int __init init_bootinfo_procfs(void)
        if (!bootinfo_copy)
                return -ENOMEM;
 
-       pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL);
+       pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_proc_ops, NULL);
        if (!pde) {
                kfree(bootinfo_copy);
                return -ENOMEM;
index a105f11..6a331bd 100644 (file)
@@ -27,6 +27,7 @@ config MICROBLAZE
        select HAVE_ARCH_HASH
        select HAVE_ARCH_KGDB
        select HAVE_DEBUG_KMEMLEAK
+       select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
index b3b433d..9b8a50f 100644 (file)
@@ -26,6 +26,7 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_EFI_PARTITION is not set
+CONFIG_CMA=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -73,7 +74,7 @@ CONFIG_FB_XILINX=y
 CONFIG_UIO=y
 CONFIG_UIO_PDRV_GENIRQ=y
 CONFIG_UIO_DMEM_GENIRQ=y
-CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_TMPFS=y
 CONFIG_CRAMFS=y
@@ -82,10 +83,11 @@ CONFIG_NFS_FS=y
 CONFIG_CIFS=y
 CONFIG_CIFS_STATS2=y
 CONFIG_ENCRYPTED_KEYS=y
+CONFIG_DMA_CMA=y
 CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SPINLOCK=y
 CONFIG_KGDB=y
 CONFIG_KGDB_TESTS=y
 CONFIG_KGDB_KDB=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_SPINLOCK=y
index 377de39..8c42078 100644 (file)
@@ -70,7 +70,7 @@ CONFIG_XILINX_WATCHDOG=y
 CONFIG_FB=y
 CONFIG_FB_XILINX=y
 # CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_CRAMFS=y
 CONFIG_ROMFS_FS=y
index 0bde47e..dcba538 100644 (file)
@@ -92,7 +92,8 @@ static inline void __disable_dcache_nomsr(void)
 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)   \
 do {                                                                   \
        int align = ~(cache_line_length - 1);                           \
-       end = min(start + cache_size, end);                             \
+       if (start <  UINT_MAX - cache_size)                             \
+               end = min(start + cache_size, end);                     \
        start &= align;                                                 \
 } while (0)
 
index ef2f494..cd9b445 100644 (file)
@@ -51,6 +51,7 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
        {"9.5", 0x22},
        {"9.6", 0x23},
        {"10.0", 0x24},
+       {"11.0", 0x25},
        {NULL, 0},
 };
 
index 7d28944..14b2764 100644 (file)
@@ -121,10 +121,10 @@ no_fdt_arg:
        tophys(r4,r4)                   /* convert to phys address */
        ori     r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 _copy_command_line:
-       /* r2=r5+r6 - r5 contain pointer to command line */
+       /* r2=r5+r11 - r5 contain pointer to command line */
        lbu     r2, r5, r11
        beqid   r2, skip                /* Skip if no data */
-       sb      r2, r4, r11             /* addr[r4+r6]= r2 */
+       sb      r2, r4, r11             /* addr[r4+r11]= r2 */
        addik   r11, r11, 1             /* increment counting */
        bgtid   r3, _copy_command_line  /* loop for all entries       */
        addik   r3, r3, -1              /* decrement loop */
@@ -139,8 +139,8 @@ skip:
        ori     r4, r0, TOPHYS(_bram_load_start)        /* save bram context */
        ori     r3, r0, (LMB_SIZE - 4)
 _copy_bram:
-       lw      r7, r0, r11             /* r7 = r0 + r6 */
-       sw      r7, r4, r11             /* addr[r4 + r6] = r7 */
+       lw      r7, r0, r11             /* r7 = r0 + r11 */
+       sw      r7, r4, r11             /* addr[r4 + r11] = r7 */
        addik   r11, r11, 4             /* increment counting */
        bgtid   r3, _copy_bram          /* loop for all entries */
        addik   r3, r3, -4              /* descrement loop */
index 050fc62..1056f16 100644 (file)
@@ -7,6 +7,7 @@
  * for more details.
  */
 
+#include <linux/dma-contiguous.h>
 #include <linux/memblock.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -345,6 +346,9 @@ asmlinkage void __init mmu_init(void)
        /* This will also cause that unflatten device tree will be allocated
         * inside 768MB limit */
        memblock_set_current_limit(memory_start + lowmem_size - 1);
+
+       /* CMA initialization */
+       dma_contiguous_reserve(memory_start + lowmem_size - 1);
 }
 
 /* This is only called until mem_init is done. */
index 16d1eb4..4ebd8ce 100644 (file)
@@ -6,7 +6,6 @@ generated-y += syscall_table_64_n64.h
 generated-y += syscall_table_64_o32.h
 generic-y += current.h
 generic-y += device.h
-generic-y += dma-contiguous.h
 generic-y += emergency-restart.h
 generic-y += export.h
 generic-y += irq_work.h
index 91b89aa..aef5378 100644 (file)
@@ -639,6 +639,11 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
 
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifdef _PAGE_HUGE
+#define pmd_leaf(pmd)  ((pmd_val(pmd) & _PAGE_HUGE) != 0)
+#define pud_leaf(pud)  ((pud_val(pud) & _PAGE_HUGE) != 0)
+#endif
+
 #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
 
 #include <asm-generic/pgtable.h>
index 8126f15..61c0334 100644 (file)
@@ -89,13 +89,12 @@ static ssize_t pvc_line_proc_write(struct file *file, const char __user *buf,
        return count;
 }
 
-static const struct file_operations pvc_line_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = pvc_line_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = pvc_line_proc_write,
+static const struct proc_ops pvc_line_proc_ops = {
+       .proc_open      = pvc_line_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = pvc_line_proc_write,
 };
 
 static ssize_t pvc_scroll_proc_write(struct file *file, const char __user *buf,
@@ -148,13 +147,12 @@ static int pvc_scroll_proc_open(struct inode *inode, struct file *file)
        return single_open(file, pvc_scroll_proc_show, NULL);
 }
 
-static const struct file_operations pvc_scroll_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = pvc_scroll_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = pvc_scroll_proc_write,
+static const struct proc_ops pvc_scroll_proc_ops = {
+       .proc_open      = pvc_scroll_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = pvc_scroll_proc_write,
 };
 
 void pvc_proc_timerfunc(struct timer_list *unused)
@@ -189,12 +187,11 @@ static int __init pvc_proc_init(void)
        }
        for (i = 0; i < PVC_NLINES; i++) {
                proc_entry = proc_create_data(pvc_linename[i], 0644, dir,
-                                       &pvc_line_proc_fops, &pvc_linedata[i]);
+                                       &pvc_line_proc_ops, &pvc_linedata[i]);
                if (proc_entry == NULL)
                        goto error;
        }
-       proc_entry = proc_create("scroll", 0644, dir,
-                                &pvc_scroll_proc_fops);
+       proc_entry = proc_create("scroll", 0644, dir, &pvc_scroll_proc_ops);
        if (proc_entry == NULL)
                goto error;
 
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
deleted file mode 100644 (file)
index d3e3d94..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PA7100LC=y
-CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_GSC_LASI=y
-# CONFIG_PDC_CHASSIS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_DIAG=m
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-CONFIG_LLC2=m
-CONFIG_NET_PKTGEN=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_ATA_OVER_ETH=m
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_SCSI_LASI700=y
-CONFIG_SCSI_DEBUG=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_LASI_82596=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_KEYBOARD_HIL_OLD is not set
-CONFIG_MOUSE_SERIAL=m
-CONFIG_LEGACY_PTY_COUNT=64
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=17
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_SERIAL_MUX is not set
-CONFIG_PDC_CONSOLE=y
-CONFIG_PRINTER=m
-CONFIG_PPDEV=m
-# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_DUMMY_CONSOLE_COLUMNS=128
-CONFIG_DUMMY_CONSOLE_ROWS=48
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_HARMONY=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_HW is not set
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
deleted file mode 100644 (file)
index 3335734..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PA8X00=y
-CONFIG_64BIT=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-# CONFIG_GSC is not set
-CONFIG_PCI=y
-CONFIG_PCI_LBA=y
-CONFIG_PCCARD=m
-# CONFIG_PCMCIA_LOAD_CIS is not set
-CONFIG_YENTA=m
-CONFIG_PD6729=m
-CONFIG_I82092=m
-# CONFIG_SUPERIO is not set
-# CONFIG_CHASSIS_LCD_LED is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
-CONFIG_LLC2=m
-CONFIG_NET_PKTGEN=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_DEBUG=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_CTL=m
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_3C589=m
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-CONFIG_ACENIC=m
-CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_PCNET32=m
-CONFIG_TIGON3=m
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_TULIP=y
-CONFIG_TULIP_MMIO=y
-CONFIG_PCMCIA_XIRCOM=m
-CONFIG_HP100=m
-CONFIG_E100=m
-CONFIG_E1000=m
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_PCMCIA_XIRC2PS=m
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_CS=m
-CONFIG_SERIAL_8250_NR_UARTS=17
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_PDC_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=y
-# CONFIG_HWMON is not set
-CONFIG_AGP=y
-CONFIG_AGP_PARISC=y
-# CONFIG_STI_CONSOLE is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V4=m
-CONFIG_NFSD=m
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_BLOWFISH=m
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
deleted file mode 100644 (file)
index 07fde5b..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODVERSIONS=y
-CONFIG_PA7100LC=y
-CONFIG_HPPB=y
-CONFIG_IOMMU_CCIO=y
-CONFIG_GSC_LASI=y
-CONFIG_GSC_WAX=y
-CONFIG_EISA=y
-CONFIG_ISA=y
-CONFIG_PCI=y
-CONFIG_GSC_DINO=y
-# CONFIG_PDC_CHASSIS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LASI700=y
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
-CONFIG_SCSI_ZALON=y
-CONFIG_SCSI_NCR53C8XX_SYNC=40
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=y
-CONFIG_LASI_82596=y
-CONFIG_PPP=y
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_HIL_OLD is not set
-CONFIG_INPUT_MISC=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=13
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_PRINTER=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_HARMONY=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_UTF8=m
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_SECURITY=y
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
deleted file mode 100644 (file)
index 64d45a8..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PA8X00=y
-CONFIG_PREEMPT_VOLUNTARY=y
-# CONFIG_GSC is not set
-CONFIG_PCI=y
-CONFIG_PCI_LBA=y
-# CONFIG_PDC_CHASSIS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_DIAG is not set
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_NETFILTER=y
-CONFIG_NET_PKTGEN=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_NS87415=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
-CONFIG_SCSI_DEBUG=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_CTL=m
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_ACENIC=m
-CONFIG_TIGON3=m
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_TULIP=y
-CONFIG_TULIP_MMIO=y
-CONFIG_E100=m
-CONFIG_E1000=m
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_MOUSE_PS2 is not set
-CONFIG_SERIO=m
-CONFIG_SERIO_LIBPS2=m
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=13
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_AD1889=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_PRINTER=m
-CONFIG_USB_STORAGE=m
-CONFIG_USB_STORAGE_USBAT=m
-CONFIG_USB_STORAGE_SDDR09=m
-CONFIG_USB_STORAGE_SDDR55=m
-CONFIG_USB_STORAGE_JUMPSHOT=m
-CONFIG_USB_MDC800=m
-CONFIG_USB_MICROTEK=m
-CONFIG_USB_LEGOTOWER=m
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_XFS_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_MUTEXES=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_MD5=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
deleted file mode 100644 (file)
index db864b1..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_PA8X00=y
-CONFIG_64BIT=y
-CONFIG_SMP=y
-CONFIG_PREEMPT=y
-CONFIG_IOMMU_CCIO=y
-CONFIG_PCI=y
-CONFIG_PCI_LBA=y
-# CONFIG_SUPERIO is not set
-# CONFIG_CHASSIS_LCD_LED is not set
-# CONFIG_PDC_CHASSIS is not set
-# CONFIG_PDC_CHASSIS_WARN is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=m
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=m
-# CONFIG_IPV6 is not set
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
-CONFIG_TIPC=m
-CONFIG_LLC2=m
-CONFIG_DNS_RESOLVER=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=y
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_CDROM_PKTCDVD_WCACHE=y
-CONFIG_ATA_OVER_ETH=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_FC_ATTRS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-CONFIG_ISCSI_TCP=m
-CONFIG_ISCSI_BOOT_SYSFS=m
-CONFIG_ATA=y
-CONFIG_PATA_SIL680=y
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=y
-CONFIG_FUSION_SAS=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_NETCONSOLE=m
-CONFIG_TUN=y
-CONFIG_E1000=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_WLAN is not set
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_HIL_OLD is not set
-# CONFIG_KEYBOARD_HIL is not set
-# CONFIG_MOUSE_PS2 is not set
-CONFIG_INPUT_MISC=y
-CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_PARKBD=m
-CONFIG_SERIO_GSCPS2=m
-# CONFIG_HP_SDC is not set
-CONFIG_SERIO_PCIPS2=m
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIO_RAW=m
-CONFIG_SERIAL_8250=y
-# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=8
-CONFIG_SERIAL_8250_RUNTIME_UARTS=8
-CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_MUX is not set
-CONFIG_SERIAL_JSM=m
-CONFIG_PRINTER=y
-CONFIG_HW_RANDOM=y
-CONFIG_RAW_DRIVER=m
-CONFIG_PTP_1588_CLOCK=y
-CONFIG_SSB=m
-CONFIG_SSB_DRIVER_PCICORE=y
-CONFIG_AGP=y
-CONFIG_AGP_PARISC=y
-CONFIG_DRM=y
-CONFIG_DRM_RADEON=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_FOREIGN_ENDIAN=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-# CONFIG_FB_STI is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_GENERIC is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-# CONFIG_STI_CONSOLE is not set
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=m
-CONFIG_SND=m
-CONFIG_SND_VERBOSE_PRINTK=y
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_AD1889=m
-# CONFIG_SND_USB is not set
-# CONFIG_SND_GSC is not set
-CONFIG_USB=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_QUOTA=y
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_XATTR=y
-CONFIG_NFS_FS=m
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_BLOCK_EXT_DEVT=y
-CONFIG_LATENCYTOP=y
-CONFIG_KEYS=y
-# CONFIG_CRYPTO_HW is not set
-CONFIG_FONTS=y
diff --git a/arch/parisc/configs/defconfig b/arch/parisc/configs/defconfig
deleted file mode 100644 (file)
index 5b877ca..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PA7100LC=y
-CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_IOMMU_CCIO=y
-CONFIG_GSC_LASI=y
-CONFIG_GSC_WAX=y
-CONFIG_EISA=y
-CONFIG_PCI=y
-CONFIG_GSC_DINO=y
-CONFIG_PCI_LBA=y
-CONFIG_PCCARD=y
-CONFIG_YENTA=y
-CONFIG_PD6729=y
-CONFIG_I82092=y
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_DIAG=m
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_LLC2=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_PC_PCMCIA=m
-CONFIG_PARPORT_1284=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_NS87415=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LASI700=y
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_ZALON=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_MD_RAID10=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_ACENIC=y
-CONFIG_TIGON3=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=y
-CONFIG_LASI_82596=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_KEYBOARD_HIL_OLD is not set
-CONFIG_MOUSE_SERIAL=y
-CONFIG_LEGACY_PTY_COUNT=64
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_CS=y
-CONFIG_SERIAL_8250_NR_UARTS=17
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_PRINTER=m
-CONFIG_PPDEV=m
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_DUMMY_CONSOLE_COLUMNS=128
-CONFIG_DUMMY_CONSOLE_ROWS=48
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_AD1889=y
-CONFIG_SND_HARMONY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_TOPSEED=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_UHCI_HCD=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_VFAT_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=y
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_KEYS=y
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
-CONFIG_FONTS=y
index c7a5726..61bac8f 100644 (file)
@@ -10,23 +10,20 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_PERF_EVENTS=y
 CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PA7100LC=y
 CONFIG_SMP=y
 CONFIG_HZ_100=y
 CONFIG_IOMMU_CCIO=y
 CONFIG_GSC_LASI=y
 CONFIG_GSC_WAX=y
-CONFIG_EISA=y
-CONFIG_PCI=y
 CONFIG_GSC_DINO=y
 CONFIG_PCI_LBA=y
-CONFIG_PCCARD=m
-CONFIG_YENTA=m
 # CONFIG_PDC_CHASSIS is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_UNUSED_SYMBOLS=y
+# CONFIG_BLK_DEV_BSG is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_NET=y
@@ -35,17 +32,15 @@ CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
 CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
 CONFIG_INET_DIAG=m
 CONFIG_LLC2=m
 # CONFIG_WIRELESS is not set
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
@@ -88,7 +83,6 @@ CONFIG_TUN=m
 # CONFIG_NET_VENDOR_ALTEON is not set
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
@@ -97,8 +91,6 @@ CONFIG_NET_TULIP=y
 CONFIG_TULIP=y
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
-# CONFIG_NET_VENDOR_HP is not set
 CONFIG_LASI_82596=y
 # CONFIG_NET_VENDOR_MELLANOX is not set
 # CONFIG_NET_VENDOR_MICREL is not set
@@ -106,10 +98,9 @@ CONFIG_LASI_82596=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
 # CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
 # CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SILAN is not set
 # CONFIG_NET_VENDOR_SIS is not set
@@ -142,7 +133,6 @@ CONFIG_PPDEV=m
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 # CONFIG_HWMON is not set
-CONFIG_AGP=y
 CONFIG_FB=y
 CONFIG_FB_FOREIGN_ENDIAN=y
 CONFIG_FB_MODE_HELPERS=y
@@ -230,63 +220,11 @@ CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
 # CONFIG_CIFS_DEBUG is not set
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=y
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_LATENCYTOP=y
-CONFIG_LKDTM=m
-CONFIG_KEYS=y
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -302,3 +240,14 @@ CONFIG_CRYPTO_DEFLATE=y
 CONFIG_CRC_CCITT=m
 CONFIG_CRC_T10DIF=y
 CONFIG_FONTS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_LATENCYTOP=y
+CONFIG_LKDTM=m
index d39e7f8..59561e0 100644 (file)
@@ -17,27 +17,24 @@ CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_BLK_DEV_INTEGRITY=y
-# CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_PA8X00=y
 CONFIG_64BIT=y
 CONFIG_SMP=y
-# CONFIG_COMPACTION is not set
 CONFIG_HPPB=y
 CONFIG_IOMMU_CCIO=y
 CONFIG_GSC_LASI=y
 CONFIG_GSC_WAX=y
-CONFIG_PCI=y
-CONFIG_PCI_STUB=m
-CONFIG_PCI_IOV=y
 CONFIG_GSC_DINO=y
 CONFIG_PCI_LBA=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -47,18 +44,17 @@ CONFIG_XFRM_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_NETFILTER=y
 # CONFIG_NETFILTER_ADVANCED is not set
 CONFIG_NETFILTER_NETLINK_LOG=y
 CONFIG_DCB=y
 # CONFIG_WIRELESS is not set
+CONFIG_PCI=y
+CONFIG_PCI_STUB=m
+CONFIG_PCI_IOV=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_LOOP=y
@@ -102,7 +98,6 @@ CONFIG_TUN=y
 # CONFIG_NET_VENDOR_ALTEON is not set
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
@@ -111,10 +106,8 @@ CONFIG_NET_TULIP=y
 CONFIG_TULIP=y
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
-CONFIG_HP100=m
-CONFIG_E1000=y
 CONFIG_LASI_82596=y
+CONFIG_E1000=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MELLANOX is not set
 # CONFIG_NET_VENDOR_MICREL is not set
@@ -124,9 +117,8 @@ CONFIG_LASI_82596=y
 # CONFIG_NET_VENDOR_OKI is not set
 CONFIG_QLA3XXX=m
 CONFIG_QLCNIC=m
-CONFIG_QLGE=m
-# CONFIG_NET_VENDOR_REALTEK is not set
 # CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SILAN is not set
 # CONFIG_NET_VENDOR_SIS is not set
@@ -153,9 +145,6 @@ CONFIG_SMSC_PHY=m
 CONFIG_STE10XP=m
 CONFIG_VITESSE_PHY=m
 CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_HIL_OLD is not set
@@ -208,34 +197,18 @@ CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
 # CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
 CONFIG_HIDRAW=y
 CONFIG_HID_PID=y
 CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_MON=m
-CONFIG_USB_WUSB_CBAF=m
-CONFIG_USB_XHCI_HCD=m
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_ONESHOT=y
-CONFIG_LEDS_TRIGGER_DISK=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_UIO=y
 CONFIG_UIO_PDRV_GENIRQ=m
 CONFIG_UIO_AEC=m
 CONFIG_UIO_SERCOS3=m
 CONFIG_UIO_PCI_GENERIC=m
 CONFIG_STAGING=y
+CONFIG_QLGE=m
+CONFIG_HP100=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_SECURITY=y
@@ -272,14 +245,6 @@ CONFIG_NLS_ASCII=m
 CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_ISO8859_2=m
 CONFIG_NLS_UTF8=m
-CONFIG_PRINTK_TIME=y
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_SCHED_DEBUG is not set
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
@@ -292,3 +257,10 @@ CONFIG_CRYPTO_DEFLATE=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_CCITT=m
 CONFIG_LIBCRC32C=y
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_SCHED_DEBUG is not set
index 354cf06..5224fb3 100644 (file)
@@ -351,7 +351,6 @@ static void __init map_pages(unsigned long start_vaddr,
                             unsigned long start_paddr, unsigned long size,
                             pgprot_t pgprot, int force)
 {
-       pgd_t *pg_dir;
        pmd_t *pmd;
        pte_t *pg_table;
        unsigned long end_paddr;
@@ -372,62 +371,37 @@ static void __init map_pages(unsigned long start_vaddr,
 
        end_paddr = start_paddr + size;
 
-       pg_dir = pgd_offset_k(start_vaddr);
-
-#if PTRS_PER_PMD == 1
-       start_pmd = 0;
-#else
+       /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
        start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-#endif
        start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 
        address = start_paddr;
        vaddr = start_vaddr;
        while (address < end_paddr) {
-#if PTRS_PER_PMD == 1
-               pmd = (pmd_t *)__pa(pg_dir);
-#else
-               pmd = (pmd_t *)pgd_address(*pg_dir);
-
-               /*
-                * pmd is physical at this point
-                */
+               pgd_t *pgd = pgd_offset_k(vaddr);
+               p4d_t *p4d = p4d_offset(pgd, vaddr);
+               pud_t *pud = pud_offset(p4d, vaddr);
 
-               if (!pmd) {
+#if CONFIG_PGTABLE_LEVELS == 3
+               if (pud_none(*pud)) {
                        pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
                                             PAGE_SIZE << PMD_ORDER);
                        if (!pmd)
                                panic("pmd allocation failed.\n");
-                       pmd = (pmd_t *) __pa(pmd);
+                       pud_populate(NULL, pud, pmd);
                }
-
-               pud_populate(NULL, (pud_t *)pg_dir, __va(pmd));
 #endif
-               pg_dir++;
-
-               /* now change pmd to kernel virtual addresses */
 
-               pmd = (pmd_t *)__va(pmd) + start_pmd;
+               pmd = pmd_offset(pud, vaddr);
                for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
-
-                       /*
-                        * pg_table is physical at this point
-                        */
-
-                       pg_table = (pte_t *)pmd_address(*pmd);
-                       if (!pg_table) {
-                               pg_table = memblock_alloc(PAGE_SIZE,
-                                                         PAGE_SIZE);
+                       if (pmd_none(*pmd)) {
+                               pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
                                if (!pg_table)
                                        panic("page table allocation failed\n");
-                               pg_table = (pte_t *) __pa(pg_table);
+                               pmd_populate_kernel(NULL, pmd, pg_table);
                        }
 
-                       pmd_populate_kernel(NULL, pmd, __va(pg_table));
-
-                       /* now change pg_table to kernel virtual addresses */
-
-                       pg_table = (pte_t *) __va(pg_table) + start_pte;
+                       pg_table = pte_offset_kernel(pmd, vaddr);
                        for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
                                pte_t pte;
                                pgprot_t prot;
index c150a9d..497b7d0 100644 (file)
@@ -1,10 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 source "arch/powerpc/platforms/Kconfig.cputype"
 
-config PPC32
-       bool
-       default y if !PPC64
-
 config 32BIT
        bool
        default y if PPC32
@@ -133,7 +129,7 @@ config PPC
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_MEMBARRIER_CALLBACKS
        select ARCH_HAS_SCALED_CPUTIME          if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
-       select ARCH_HAS_STRICT_KERNEL_RWX       if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
+       select ARCH_HAS_STRICT_KERNEL_RWX       if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
        select ARCH_HAS_TICK_BROADCAST          if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAS_UACCESS_FLUSHCACHE
        select ARCH_HAS_UACCESS_MCSAFE          if PPC64
@@ -173,6 +169,7 @@ config PPC
        select HAVE_ARCH_HUGE_VMAP              if PPC_BOOK3S_64 && PPC_RADIX_MMU
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN                  if PPC32
+       select HAVE_ARCH_KASAN_VMALLOC          if PPC32
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if COMPAT
@@ -222,9 +219,8 @@ config PPC
        select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
-       select HAVE_RCU_TABLE_FREE              if SMP
-       select HAVE_RCU_TABLE_NO_INVALIDATE     if HAVE_RCU_TABLE_FREE
-       select HAVE_MMU_GATHER_PAGE_SIZE
+       select MMU_GATHER_RCU_TABLE_FREE
+       select MMU_GATHER_PAGE_SIZE
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE         if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
        select HAVE_SYSCALL_TRACEPOINTS
@@ -483,7 +479,7 @@ config MPROFILE_KERNEL
 config HOTPLUG_CPU
        bool "Support for enabling/disabling CPUs"
        depends on SMP && (PPC_PSERIES || \
-       PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
+               PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
        help
          Say Y here to be able to disable and re-enable individual
          CPUs at runtime on SMP machines.
index 4e1d398..0b06383 100644 (file)
@@ -371,7 +371,7 @@ config PPC_PTDUMP
 
 config PPC_DEBUG_WX
        bool "Warn on W+X mappings at boot"
-       depends on PPC_PTDUMP
+       depends on PPC_PTDUMP && STRICT_KERNEL_RWX
        help
          Generate a warning if any W+X mappings are found at boot.
 
index 134f12f..2268396 100644 (file)
@@ -17,11 +17,11 @@ quiet_cmd_head_check = CHKHEAD $@
 quiet_cmd_relocs_check = CHKREL  $@
 ifdef CONFIG_PPC_BOOK3S_64
       cmd_relocs_check =                                               \
-       $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" ; \
+       $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" ; \
        $(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@"
 else
       cmd_relocs_check =                                               \
-       $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@"
+       $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@"
 endif
 
 # `@true` prevents complaint when there is nothing to be done
index 1699e95..00c4d84 100644 (file)
@@ -228,7 +228,7 @@ void ibm4xx_denali_fixup_memsize(void)
                dpath = 8; /* 64 bits */
 
        /* get address pins (rows) */
-       val = SDRAM0_READ(DDR0_42);
+       val = SDRAM0_READ(DDR0_42);
 
        row = DDR_GET_VAL(val, DDR_APIN, DDR_APIN_SHIFT);
        if (row > max_row)
index a2dd5f1..7de0689 100644 (file)
                                reg = <0x11a80 0x40 0x89fc 0x2>;
                                interrupts = <2 8>;
                                interrupt-parent = <&PIC>;
-                               gpios = < &cpm2_pio_d 19 0>;
+                               cs-gpios = < &cpm2_pio_d 19 0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
                                ds3106@1 {
index b625718..ecebc27 100644 (file)
                        reg = <0x4c0 0x40>;
                        interrupts = <2>;
                        interrupt-parent = <&qeic>;
-                       gpios = <&qe_pio_d 13 0>;
+                       cs-gpios = <&qe_pio_d 13 0>;
                        mode = "cpu-qe";
 
                        mmc-slot@0 {
index 1a8321a..33bbe58 100644 (file)
                        interrupts = <59 2>;
                        interrupt-parent = <&mpic>;
                        mode = "cpu";
-                       gpios = <&sdcsr_pio 7 0>;
+                       cs-gpios = <&sdcsr_pio 7 0>;
                        sleep = <&pmc 0x00000800 0>;
 
                        mmc-slot@0 {
index f0c8a07..7705a5c 100644 (file)
@@ -59,7 +59,6 @@ CONFIG_BLK_DEV_SD=y
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_EXAR is not set
-# CONFIG_NET_VENDOR_HP is not set
 CONFIG_IBM_EMAC=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MELLANOX is not set
index ed02f12..22dc0da 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_AMIGA_PARTITION=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_EBONY is not set
 CONFIG_SAM440EP=y
 CONFIG_CMDLINE_BOOL=y
index fdb11da..789622f 100644 (file)
@@ -14,8 +14,6 @@ CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_CHRP is not set
 CONFIG_PPC_MPC52xx=y
 CONFIG_PPC_MPC5200_SIMPLE=y
index 648c6b3..24bf1bd 100644 (file)
@@ -11,8 +11,6 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_MSDOS_PARTITION is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
 CONFIG_PPC_83xx=y
index 510f7fd..f55e23c 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_PPC_ADDER875=y
 CONFIG_8xx_COPYBACK=y
 CONFIG_GEN_RTC=y
index 6e08d95..00d6996 100644 (file)
@@ -6,7 +6,6 @@ CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
 CONFIG_PPC_82xx=y
index 9c1bf60..0e2e5e8 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_PPC_EP88XC=y
 CONFIG_8xx_COPYBACK=y
 CONFIG_GEN_RTC=y
index 6ce4f20..dcc8dcc 100644 (file)
@@ -12,7 +12,6 @@ CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_SLAB=y
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_PMAC is not set
 CONFIG_PPC_82xx=y
 CONFIG_MGCOGE=y
index 1f3a045..e39346b 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_CHRP is not set
 CONFIG_PPC_MPC512x=y
 CONFIG_MPC512x_LPBFIFO=y
index 0327a32..82a008c 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_8xx_COPYBACK=y
 CONFIG_GEN_RTC=y
 CONFIG_HZ_100=y
index 3284145..7174937 100644 (file)
@@ -181,7 +181,6 @@ CONFIG_MLX5_FPGA=y
 CONFIG_MLX5_CORE_EN=y
 CONFIG_MLX5_CORE_IPOIB=y
 CONFIG_MYRI10GE=m
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_USB_NET_DRIVERS=m
 # CONFIG_WLAN is not set
index b250e6f..7e68cb2 100644 (file)
@@ -189,7 +189,6 @@ CONFIG_MLX4_EN=m
 CONFIG_MYRI10GE=m
 CONFIG_S2IO=m
 CONFIG_PASEMI_MAC=y
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_SUNGEM=y
 CONFIG_GELIC_NET=m
index 7e28919..3e2f44f 100644 (file)
@@ -507,7 +507,6 @@ CONFIG_FORCEDETH=m
 CONFIG_HAMACHI=m
 CONFIG_YELLOWFIN=m
 CONFIG_QLA3XXX=m
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_8139CP=m
 CONFIG_8139TOO=m
index 26126b4..6b68109 100644 (file)
@@ -169,7 +169,6 @@ CONFIG_IXGBE=m
 CONFIG_I40E=m
 CONFIG_MLX4_EN=m
 CONFIG_MYRI10GE=m
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
index 069f67f..1b6bdad 100644 (file)
@@ -1,8 +1,3 @@
-CONFIG_PPC64=y
-CONFIG_ALTIVEC=y
-CONFIG_VSX=y
-CONFIG_NR_CPUS=2048
-CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_KERNEL_XZ=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
@@ -28,17 +23,15 @@ CONFIG_EXPERT=y
 # CONFIG_AIO is not set
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
 CONFIG_SLAB_FREELIST_HARDENED=y
-CONFIG_JUMP_LABEL=y
-CONFIG_STRICT_KERNEL_RWX=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_SIG=y
-CONFIG_MODULE_SIG_FORCE=y
-CONFIG_MODULE_SIG_SHA512=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MQ_IOSCHED_DEADLINE is not set
-# CONFIG_MQ_IOSCHED_KYBER is not set
+CONFIG_PPC64=y
+CONFIG_ALTIVEC=y
+CONFIG_VSX=y
+CONFIG_NR_CPUS=2048
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_PANIC_TIMEOUT=30
 # CONFIG_PPC_VAS is not set
 # CONFIG_PPC_PSERIES is not set
 # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
@@ -46,17 +39,27 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_IDLE=y
 CONFIG_HZ_100=y
 CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
 CONFIG_PRESERVE_FA_DUMP=y
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_NUMA=y
-# CONFIG_COMPACTION is not set
-# CONFIG_MIGRATION is not set
 CONFIG_PPC_64K_PAGES=y
 CONFIG_SCHED_SMT=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=tty0 console=hvc0 ipr.fast_reboot=1 quiet"
 # CONFIG_SECCOMP is not set
 # CONFIG_PPC_MEM_KEYS is not set
+CONFIG_JUMP_LABEL=y
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -138,7 +141,6 @@ CONFIG_TIGON3=m
 CONFIG_BNX2X=m
 # CONFIG_NET_VENDOR_BROCADE is not set
 # CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CAVIUM is not set
 CONFIG_CHELSIO_T1=m
 # CONFIG_NET_VENDOR_CISCO is not set
@@ -147,7 +149,6 @@ CONFIG_CHELSIO_T1=m
 # CONFIG_NET_VENDOR_DLINK is not set
 CONFIG_BE2NET=m
 # CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 CONFIG_E1000=m
 CONFIG_E1000E=m
@@ -155,7 +156,6 @@ CONFIG_IGB=m
 CONFIG_IXGB=m
 CONFIG_IXGBE=m
 CONFIG_I40E=m
-CONFIG_S2IO=m
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 # CONFIG_MLX4_CORE_GEN2 is not set
@@ -166,12 +166,12 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_MICROSEMI is not set
 CONFIG_MYRI10GE=m
 # CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_S2IO=m
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
 # CONFIG_NET_VENDOR_PACKET_ENGINES is not set
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_QED=m
 CONFIG_QEDE=m
@@ -238,7 +238,6 @@ CONFIG_HID_CYPRESS=y
 CONFIG_HID_EZKEY=y
 CONFIG_HID_ITE=y
 CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MONTEREY=y
 CONFIG_USB_HIDDEV=y
@@ -275,6 +274,18 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
+CONFIG_ENCRYPTED_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_LOCKDOWN_LSM=y
+CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY=y
+# CONFIG_INTEGRITY is not set
+CONFIG_LSM="yama,loadpin,safesetid,integrity"
+# CONFIG_CRYPTO_HW is not set
 CONFIG_CRC16=y
 CONFIG_CRC_ITU_T=y
 CONFIG_LIBCRC32C=y
@@ -285,17 +296,20 @@ CONFIG_LIBCRC32C=y
 # CONFIG_XZ_DEC_SPARC is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_PANIC_ON_OOPS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
 CONFIG_WQ_WATCHDOG=y
 # CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_DEBUG_CREDENTIALS=y
 # CONFIG_FTRACE is not set
-# CONFIG_RUNTIME_TESTING_MENU is not set
 CONFIG_XMON=y
-CONFIG_XMON_DEFAULT=y
-CONFIG_ENCRYPTED_KEYS=y
-# CONFIG_CRYPTO_ECHAINIV is not set
-# CONFIG_CRYPTO_HW is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
index 29b19ec..b964084 100644 (file)
@@ -77,5 +77,4 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
 CONFIG_CRC_T10DIF=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
index ffed2b4..eda8bfb 100644 (file)
@@ -14,7 +14,6 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_TQM8XX=y
 CONFIG_8xx_COPYBACK=y
 # CONFIG_8xx_CPU15 is not set
index f9dc597..3c0ba22 100644 (file)
@@ -102,41 +102,91 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
        isync();        /* Context sync required after mtsrin() */
 }
 
-static inline void allow_user_access(void __user *to, const void __user *from, u32 size)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+                                             u32 size, unsigned long dir)
 {
        u32 addr, end;
 
-       if (__builtin_constant_p(to) && to == NULL)
+       BUILD_BUG_ON(!__builtin_constant_p(dir));
+       BUILD_BUG_ON(dir == KUAP_CURRENT);
+
+       if (!(dir & KUAP_WRITE))
                return;
 
        addr = (__force u32)to;
 
-       if (!addr || addr >= TASK_SIZE || !size)
+       if (unlikely(addr >= TASK_SIZE || !size))
                return;
 
        end = min(addr + size, TASK_SIZE);
+
        current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
        kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end);       /* Clear Ks */
 }
 
-static inline void prevent_user_access(void __user *to, const void __user *from, u32 size)
+static __always_inline void prevent_user_access(void __user *to, const void __user *from,
+                                               u32 size, unsigned long dir)
 {
-       u32 addr = (__force u32)to;
-       u32 end = min(addr + size, TASK_SIZE);
+       u32 addr, end;
+
+       BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+       if (dir == KUAP_CURRENT) {
+               u32 kuap = current->thread.kuap;
 
-       if (!addr || addr >= TASK_SIZE || !size)
+               if (unlikely(!kuap))
+                       return;
+
+               addr = kuap & 0xf0000000;
+               end = kuap << 28;
+       } else if (dir & KUAP_WRITE) {
+               addr = (__force u32)to;
+               end = min(addr + size, TASK_SIZE);
+
+               if (unlikely(addr >= TASK_SIZE || !size))
+                       return;
+       } else {
                return;
+       }
 
        current->thread.kuap = 0;
        kuap_update_sr(mfsrin(addr) | SR_KS, addr, end);        /* set Ks */
 }
 
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline unsigned long prevent_user_access_return(void)
+{
+       unsigned long flags = current->thread.kuap;
+       unsigned long addr = flags & 0xf0000000;
+       unsigned long end = flags << 28;
+       void __user *to = (__force void __user *)addr;
+
+       if (flags)
+               prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+
+       return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
 {
+       unsigned long addr = flags & 0xf0000000;
+       unsigned long end = flags << 28;
+       void __user *to = (__force void __user *)addr;
+
+       if (flags)
+               allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+}
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+       unsigned long begin = regs->kuap & 0xf0000000;
+       unsigned long end = regs->kuap << 28;
+
        if (!is_write)
                return false;
 
-       return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
+       return WARN(address < begin || address >= end,
+                   "Bug: write fault blocked by segment registers !");
 }
 
 #endif /* CONFIG_PPC_KUAP */
index 9983177..dc5c039 100644 (file)
@@ -49,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
 
 #define get_hugepd_cache_index(x)  (x)
 
-#ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
                                    void *table, int shift)
 {
@@ -66,13 +65,6 @@ static inline void __tlb_remove_table(void *_table)
 
        pgtable_free(table, shift);
 }
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb,
-                                   void *table, int shift)
-{
-       pgtable_free(table, shift);
-}
-#endif
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
index 0796533..5b39c11 100644 (file)
@@ -193,7 +193,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 #else
 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 #endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END    _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
 #define VMALLOC_END    ioremap_bot
+#endif
 
 #ifndef __ASSEMBLY__
 #include <linux/sched.h>
index f254de9..90dd3a3 100644 (file)
  * because that would require an expensive read/modify write of the AMR.
  */
 
+static inline unsigned long get_kuap(void)
+{
+       if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
+               return 0;
+
+       return mfspr(SPRN_AMR);
+}
+
 static inline void set_kuap(unsigned long value)
 {
        if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
@@ -77,25 +85,43 @@ static inline void set_kuap(unsigned long value)
        isync();
 }
 
-static inline void allow_user_access(void __user *to, const void __user *from,
-                                    unsigned long size)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+                                             unsigned long size, unsigned long dir)
 {
        // This is written so we can resolve to a single case at build time
-       if (__builtin_constant_p(to) && to == NULL)
+       BUILD_BUG_ON(!__builtin_constant_p(dir));
+       if (dir == KUAP_READ)
                set_kuap(AMR_KUAP_BLOCK_WRITE);
-       else if (__builtin_constant_p(from) && from == NULL)
+       else if (dir == KUAP_WRITE)
                set_kuap(AMR_KUAP_BLOCK_READ);
-       else
+       else if (dir == KUAP_READ_WRITE)
                set_kuap(0);
+       else
+               BUILD_BUG();
 }
 
 static inline void prevent_user_access(void __user *to, const void __user *from,
-                                      unsigned long size)
+                                      unsigned long size, unsigned long dir)
+{
+       set_kuap(AMR_KUAP_BLOCKED);
+}
+
+static inline unsigned long prevent_user_access_return(void)
 {
+       unsigned long flags = get_kuap();
+
        set_kuap(AMR_KUAP_BLOCKED);
+
+       return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+       set_kuap(flags);
 }
 
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 {
        return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
                    (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
index f6968c8..a41e91b 100644 (file)
@@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_list;
 extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
 extern void pmd_fragment_free(unsigned long *);
 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
-#ifdef CONFIG_SMP
 extern void __tlb_remove_table(void *_table);
-#endif
 void pte_frag_destroy(void *pte_frag);
 
 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
index b01624e..201a69e 100644 (file)
@@ -1355,18 +1355,21 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
  * Like pmd_huge() and pmd_large(), but works regardless of config options
  */
 #define pmd_is_leaf pmd_is_leaf
+#define pmd_leaf pmd_is_leaf
 static inline bool pmd_is_leaf(pmd_t pmd)
 {
        return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
 }
 
 #define pud_is_leaf pud_is_leaf
+#define pud_leaf pud_is_leaf
 static inline bool pud_is_leaf(pud_t pud)
 {
        return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
 }
 
 #define pgd_is_leaf pgd_is_leaf
+#define pgd_leaf pgd_is_leaf
 static inline bool pgd_is_leaf(pgd_t pgd)
 {
        return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
index cf00ff0..40a4d3c 100644 (file)
@@ -212,6 +212,7 @@ static inline void cpu_feature_keys_init(void) { }
 #define CPU_FTR_P9_TLBIE_STQ_BUG       LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_P9_TIDR                        LONG_ASM_CONST(0x0000800000000000)
 #define CPU_FTR_P9_TLBIE_ERAT_BUG      LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_P9_RADIX_PREFETCH_BUG  LONG_ASM_CONST(0x0002000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -459,8 +460,10 @@ static inline void cpu_feature_keys_init(void) { }
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
            CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
            CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
-#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
-#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
+#define CPU_FTRS_POWER9_DD2_0 (CPU_FTRS_POWER9 | CPU_FTR_P9_RADIX_PREFETCH_BUG)
+#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | \
+                              CPU_FTR_P9_RADIX_PREFETCH_BUG | \
+                              CPU_FTR_POWER9_DD2_1)
 #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
                               CPU_FTR_P9_TM_HV_ASSIST | \
                               CPU_FTR_P9_TM_XER_SO_BUG)
index b3e214a..ca33f4e 100644 (file)
@@ -33,7 +33,7 @@
 #define FW_FEATURE_LLAN                ASM_CONST(0x0000000000010000)
 #define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000000020000)
 #define FW_FEATURE_XDABR       ASM_CONST(0x0000000000040000)
-#define FW_FEATURE_MULTITCE    ASM_CONST(0x0000000000080000)
+#define FW_FEATURE_PUT_TCE_IND ASM_CONST(0x0000000000080000)
 #define FW_FEATURE_SPLPAR      ASM_CONST(0x0000000000100000)
 #define FW_FEATURE_LPAR                ASM_CONST(0x0000000000400000)
 #define FW_FEATURE_PS3_LV1     ASM_CONST(0x0000000000800000)
@@ -51,6 +51,7 @@
 #define FW_FEATURE_BLOCK_REMOVE ASM_CONST(0x0000001000000000)
 #define FW_FEATURE_PAPR_SCM    ASM_CONST(0x0000002000000000)
 #define FW_FEATURE_ULTRAVISOR  ASM_CONST(0x0000004000000000)
+#define FW_FEATURE_STUFF_TCE   ASM_CONST(0x0000008000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -63,7 +64,8 @@ enum {
                FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
                FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
                FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
-               FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
+               FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE |
+               FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
                FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
                FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
                FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN |
index 27ac6f5..f2f8d8a 100644 (file)
@@ -34,7 +34,11 @@ struct arch_hw_breakpoint {
 #define HW_BRK_TYPE_PRIV_ALL   (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
                                 HW_BRK_TYPE_HYP)
 
+#ifdef CONFIG_PPC_8xx
+#define HW_BREAKPOINT_ALIGN 0x3
+#else
 #define HW_BREAKPOINT_ALIGN 0x7
+#endif
 
 #define DABR_MAX_LEN   8
 #define DAWR_MAX_LEN   512
index 296e51c..fbff9ff 100644 (file)
 void kasan_early_init(void);
 void kasan_mmu_init(void);
 void kasan_init(void);
+void kasan_late_init(void);
 #else
 static inline void kasan_init(void) { }
 static inline void kasan_mmu_init(void) { }
+static inline void kasan_late_init(void) { }
 #endif
 
 #endif /* __ASSEMBLY */
index 5b5e396..92bcd1a 100644 (file)
@@ -2,6 +2,16 @@
 #ifndef _ASM_POWERPC_KUP_H_
 #define _ASM_POWERPC_KUP_H_
 
+#define KUAP_READ      1
+#define KUAP_WRITE     2
+#define KUAP_READ_WRITE        (KUAP_READ | KUAP_WRITE)
+/*
+ * For prevent_user_access() only.
+ * Use the current saved situation instead of the to/from/size params.
+ * Used on book3s/32
+ */
+#define KUAP_CURRENT   4
+
 #ifdef CONFIG_PPC64
 #include <asm/book3s/64/kup-radix.h>
 #endif
@@ -42,32 +52,55 @@ void setup_kuap(bool disabled);
 #else
 static inline void setup_kuap(bool disabled) { }
 static inline void allow_user_access(void __user *to, const void __user *from,
-                                    unsigned long size) { }
+                                    unsigned long size, unsigned long dir) { }
 static inline void prevent_user_access(void __user *to, const void __user *from,
-                                      unsigned long size) { }
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) { return false; }
+                                      unsigned long size, unsigned long dir) { }
+static inline unsigned long prevent_user_access_return(void) { return 0UL; }
+static inline void restore_user_access(unsigned long flags) { }
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+       return false;
+}
 #endif /* CONFIG_PPC_KUAP */
 
 static inline void allow_read_from_user(const void __user *from, unsigned long size)
 {
-       allow_user_access(NULL, from, size);
+       allow_user_access(NULL, from, size, KUAP_READ);
 }
 
 static inline void allow_write_to_user(void __user *to, unsigned long size)
 {
-       allow_user_access(to, NULL, size);
+       allow_user_access(to, NULL, size, KUAP_WRITE);
+}
+
+static inline void allow_read_write_user(void __user *to, const void __user *from,
+                                        unsigned long size)
+{
+       allow_user_access(to, from, size, KUAP_READ_WRITE);
 }
 
 static inline void prevent_read_from_user(const void __user *from, unsigned long size)
 {
-       prevent_user_access(NULL, from, size);
+       prevent_user_access(NULL, from, size, KUAP_READ);
 }
 
 static inline void prevent_write_to_user(void __user *to, unsigned long size)
 {
-       prevent_user_access(to, NULL, size);
+       prevent_user_access(to, NULL, size, KUAP_WRITE);
+}
+
+static inline void prevent_read_write_user(void __user *to, const void __user *from,
+                                          unsigned long size)
+{
+       prevent_user_access(to, from, size, KUAP_READ_WRITE);
+}
+
+static inline void prevent_current_access_user(void)
+{
+       prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
 }
 
 #endif /* !__ASSEMBLY__ */
 
-#endif /* _ASM_POWERPC_KUP_H_ */
+#endif /* _ASM_POWERPC_KUAP_H_ */
index 1006a42..85ed239 100644 (file)
 #include <asm/reg.h>
 
 static inline void allow_user_access(void __user *to, const void __user *from,
-                                    unsigned long size)
+                                    unsigned long size, unsigned long dir)
 {
        mtspr(SPRN_MD_AP, MD_APG_INIT);
 }
 
 static inline void prevent_user_access(void __user *to, const void __user *from,
-                                      unsigned long size)
+                                      unsigned long size, unsigned long dir)
 {
        mtspr(SPRN_MD_AP, MD_APG_KUAP);
 }
 
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline unsigned long prevent_user_access_return(void)
+{
+       unsigned long flags = mfspr(SPRN_MD_AP);
+
+       mtspr(SPRN_MD_AP, MD_APG_KUAP);
+
+       return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+       mtspr(SPRN_MD_AP, flags);
+}
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 {
        return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
                    "Bug: fault blocked by AP register !");
index 552b96e..60c4d82 100644 (file)
@@ -114,7 +114,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 #else
 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 #endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END    _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
 #define VMALLOC_END    ioremap_bot
+#endif
 
 /*
  * Bits in a linux-style PTE.  These match the bits in the
index 332b13b..29c4366 100644 (file)
@@ -46,7 +46,6 @@ static inline void pgtable_free(void *table, int shift)
 
 #define get_hugepd_cache_index(x)      (x)
 
-#ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
 {
        unsigned long pgf = (unsigned long)table;
@@ -64,13 +63,6 @@ static inline void __tlb_remove_table(void *_table)
        pgtable_free(table, shift);
 }
 
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
-{
-       pgtable_free(table, shift);
-}
-#endif
-
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
index 7f1fd41..8633208 100644 (file)
@@ -209,7 +209,7 @@ static inline bool pfn_valid(unsigned long pfn)
  */
 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
-#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
+#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
 #else
 #ifdef CONFIG_PPC64
 /*
index ea6ec65..69f4cb3 100644 (file)
@@ -223,12 +223,15 @@ struct pci_dn {
 extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
                                           int devfn);
 extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
-extern struct pci_dn *add_dev_pci_data(struct pci_dev *pdev);
-extern void remove_dev_pci_data(struct pci_dev *pdev);
 extern struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
                                               struct device_node *dn);
 extern void pci_remove_device_node_info(struct device_node *dn);
 
+#ifdef CONFIG_PCI_IOV
+struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev);
+void remove_sriov_vf_pdns(struct pci_dev *pdev);
+#endif
+
 static inline int pci_device_from_OF_node(struct device_node *np,
                                          u8 *bus, u8 *devfn)
 {
index 327567b..63ed7e3 100644 (file)
@@ -113,7 +113,6 @@ extern pgprot_t     pci_phys_mem_access_prot(struct file *file,
                                         pgprot_t prot);
 
 extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
-extern void pcibios_setup_bus_devices(struct pci_bus *bus);
 extern void pcibios_setup_bus_self(struct pci_bus *bus);
 extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
 extern void pcibios_scan_phb(struct pci_controller *hose);
index 0e4ec8c..8cc543e 100644 (file)
@@ -94,12 +94,6 @@ void mark_initmem_nx(void);
 static inline void mark_initmem_nx(void) { }
 #endif
 
-#ifdef CONFIG_PPC_DEBUG_WX
-void ptdump_check_wx(void);
-#else
-static inline void ptdump_check_wx(void) { }
-#endif
-
 /*
  * When used, PTE_FRAG_NR is defined in subarch pgtable.h
  * so we are sure it is included when arriving here.
index edcb1fc..d0ee0ed 100644 (file)
@@ -15,6 +15,7 @@
 #define PCI_SLOT_ID_PREFIX     (1UL << 63)
 #define PCI_SLOT_ID(phb_id, bdfn)      \
        (PCI_SLOT_ID_PREFIX | ((uint64_t)(bdfn) << 16) | (phb_id))
+#define PCI_PHB_SLOT_ID(phb_id)                (phb_id)
 
 extern int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id);
 extern int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len);
index a9993e7..8387698 100644 (file)
@@ -162,6 +162,12 @@ struct thread_struct {
 #endif
 #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
        unsigned long   kuap;           /* opened segments for user access */
+#endif
+#ifdef CONFIG_VMAP_STACK
+       unsigned long   srr0;
+       unsigned long   srr1;
+       unsigned long   dar;
+       unsigned long   dsisr;
 #endif
        /* Debug Registers */
        struct debug_reg debug;
@@ -412,6 +418,9 @@ static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
 extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
 extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
 extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
+#ifdef CONFIG_PPC_970_NAP
+extern void power4_idle_nap(void);
+#endif
 
 extern unsigned long cpuidle_disable;
 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
index 07df35e..299ee7b 100644 (file)
 #define SPRN_CMPE      152
 #define SPRN_CMPF      153
 #define SPRN_LCTRL1    156
+#define   LCTRL1_CTE_GT                0xc0000000
+#define   LCTRL1_CTF_LT                0x14000000
+#define   LCTRL1_CRWE_RW       0x00000000
+#define   LCTRL1_CRWE_RO       0x00040000
+#define   LCTRL1_CRWE_WO       0x000c0000
+#define   LCTRL1_CRWF_RW       0x00000000
+#define   LCTRL1_CRWF_RO       0x00010000
+#define   LCTRL1_CRWF_WO       0x00030000
 #define SPRN_LCTRL2    157
+#define   LCTRL2_LW0EN         0x80000000
+#define   LCTRL2_LW0LA_E       0x00000000
+#define   LCTRL2_LW0LA_F       0x04000000
+#define   LCTRL2_LW0LA_EandF   0x08000000
+#define   LCTRL2_LW0LADC       0x02000000
+#define   LCTRL2_SLW0EN                0x00000002
 #ifdef CONFIG_PPC_8xx
 #define SPRN_ICTRL     158
 #endif
index 8e1d019..a227074 100644 (file)
 #define _ASM_POWERPC_THREAD_INFO_H
 
 #include <asm/asm-const.h>
+#include <asm/page.h>
 
 #ifdef __KERNEL__
 
+#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
+#define THREAD_SHIFT           PAGE_SHIFT
+#else
 #define THREAD_SHIFT           CONFIG_THREAD_SHIFT
+#endif
 
 #define THREAD_SIZE            (1 << THREAD_SHIFT)
 
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN_SHIFT     (THREAD_SHIFT + 1)
+#else
+#define THREAD_ALIGN_SHIFT     THREAD_SHIFT
+#endif
+
+#define THREAD_ALIGN           (1 << THREAD_ALIGN_SHIFT)
+
 #ifndef __ASSEMBLY__
 #include <linux/cache.h>
 #include <asm/processor.h>
index b2c0be9..7f3a8b9 100644 (file)
 
 #define tlb_flush tlb_flush
 extern void tlb_flush(struct mmu_gather *tlb);
+/*
+ * book3s:
+ * Hash does not use the linux page-tables, so we can avoid
+ * the TLB invalidate for page-table freeing, Radix otoh does use the
+ * page-tables and needs the TLBI.
+ *
+ * nohash:
+ * We still do TLB invalidate in the __pte_free_tlb routine before we
+ * add the page table pages to mmu gather table batch.
+ */
+#define tlb_needs_table_invalidate()   radix_enabled()
 
 /* Get the generic bits... */
 #include <asm-generic/tlb.h>
index c92fe7f..2f500de 100644 (file)
@@ -91,9 +91,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
        __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
 #define __get_user(x, ptr) \
-       __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+       __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
 #define __put_user(x, ptr) \
-       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
+
+#define __get_user_allowed(x, ptr) \
+       __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
+#define __put_user_allowed(x, ptr) \
+       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
 
 #define __get_user_inatomic(x, ptr) \
        __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
@@ -138,10 +143,9 @@ extern long __put_user_bad(void);
                : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
 #endif /* __powerpc64__ */
 
-#define __put_user_size(x, ptr, size, retval)                  \
+#define __put_user_size_allowed(x, ptr, size, retval)          \
 do {                                                           \
        retval = 0;                                             \
-       allow_write_to_user(ptr, size);                         \
        switch (size) {                                         \
          case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
          case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
@@ -149,17 +153,26 @@ do {                                                              \
          case 8: __put_user_asm2(x, ptr, retval); break;       \
          default: __put_user_bad();                            \
        }                                                       \
+} while (0)
+
+#define __put_user_size(x, ptr, size, retval)                  \
+do {                                                           \
+       allow_write_to_user(ptr, size);                         \
+       __put_user_size_allowed(x, ptr, size, retval);          \
        prevent_write_to_user(ptr, size);                       \
 } while (0)
 
-#define __put_user_nocheck(x, ptr, size)                       \
+#define __put_user_nocheck(x, ptr, size, do_allow)                     \
 ({                                                             \
        long __pu_err;                                          \
        __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
        if (!is_kernel_addr((unsigned long)__pu_addr))          \
                might_fault();                                  \
        __chk_user_ptr(ptr);                                    \
-       __put_user_size((x), __pu_addr, (size), __pu_err);      \
+       if (do_allow)                                                           \
+               __put_user_size((x), __pu_addr, (size), __pu_err);              \
+       else                                                                    \
+               __put_user_size_allowed((x), __pu_addr, (size), __pu_err);      \
        __pu_err;                                               \
 })
 
@@ -236,13 +249,12 @@ extern long __get_user_bad(void);
                : "b" (addr), "i" (-EFAULT), "0" (err))
 #endif /* __powerpc64__ */
 
-#define __get_user_size(x, ptr, size, retval)                  \
+#define __get_user_size_allowed(x, ptr, size, retval)          \
 do {                                                           \
        retval = 0;                                             \
        __chk_user_ptr(ptr);                                    \
        if (size > sizeof(x))                                   \
                (x) = __get_user_bad();                         \
-       allow_read_from_user(ptr, size);                        \
        switch (size) {                                         \
        case 1: __get_user_asm(x, ptr, retval, "lbz"); break;   \
        case 2: __get_user_asm(x, ptr, retval, "lhz"); break;   \
@@ -250,6 +262,12 @@ do {                                                               \
        case 8: __get_user_asm2(x, ptr, retval);  break;        \
        default: (x) = __get_user_bad();                        \
        }                                                       \
+} while (0)
+
+#define __get_user_size(x, ptr, size, retval)                  \
+do {                                                           \
+       allow_read_from_user(ptr, size);                        \
+       __get_user_size_allowed(x, ptr, size, retval);          \
        prevent_read_from_user(ptr, size);                      \
 } while (0)
 
@@ -260,7 +278,7 @@ do {                                                                \
 #define __long_type(x) \
        __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 
-#define __get_user_nocheck(x, ptr, size)                       \
+#define __get_user_nocheck(x, ptr, size, do_allow)                     \
 ({                                                             \
        long __gu_err;                                          \
        __long_type(*(ptr)) __gu_val;                           \
@@ -269,7 +287,10 @@ do {                                                               \
        if (!is_kernel_addr((unsigned long)__gu_addr))          \
                might_fault();                                  \
        barrier_nospec();                                       \
-       __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       if (do_allow)                                                           \
+               __get_user_size(__gu_val, __gu_addr, (size), __gu_err);         \
+       else                                                                    \
+               __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
 })
@@ -313,9 +334,9 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
        unsigned long ret;
 
        barrier_nospec();
-       allow_user_access(to, from, n);
+       allow_read_write_user(to, from, n);
        ret = __copy_tofrom_user(to, from, n);
-       prevent_user_access(to, from, n);
+       prevent_read_write_user(to, from, n);
        return ret;
 }
 #endif /* __powerpc64__ */
@@ -356,33 +377,40 @@ static inline unsigned long raw_copy_from_user(void *to,
        return ret;
 }
 
-static inline unsigned long raw_copy_to_user(void __user *to,
-               const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
 {
-       unsigned long ret;
        if (__builtin_constant_p(n) && (n <= 8)) {
-               ret = 1;
+               unsigned long ret = 1;
 
                switch (n) {
                case 1:
-                       __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
+                       __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
                        break;
                case 2:
-                       __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
+                       __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
                        break;
                case 4:
-                       __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
+                       __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
                        break;
                case 8:
-                       __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
+                       __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
                        break;
                }
                if (ret == 0)
                        return 0;
        }
 
+       return __copy_tofrom_user(to, (__force const void __user *)from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       unsigned long ret;
+
        allow_write_to_user(to, n);
-       ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
+       ret = raw_copy_to_user_allowed(to, from, n);
        prevent_write_to_user(to, n);
        return ret;
 }
@@ -428,4 +456,22 @@ extern long __copy_from_user_flushcache(void *dst, const void __user *src,
 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
                           size_t len);
 
+static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+       if (unlikely(!access_ok(ptr, len)))
+               return false;
+       allow_read_write_user((void __user *)ptr, ptr, len);
+       return true;
+}
+#define user_access_begin      user_access_begin
+#define user_access_end                prevent_current_access_user
+#define user_access_save       prevent_user_access_return
+#define user_access_restore    restore_user_access
+
+#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
+#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
+#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
+#define unsafe_copy_to_user(d, s, l, e) \
+       unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
+
 #endif /* _ARCH_POWERPC_UACCESS_H */
index 40f13f3..b9ef6cf 100644 (file)
@@ -108,16 +108,22 @@ struct vdso_data {
        __u32 stamp_sec_fraction;       /* fractional seconds of stamp_xtime */
        __u32 hrtimer_res;              /* hrtimer resolution */
        __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
-       __u32 dcache_block_size;        /* L1 d-cache block size     */
-       __u32 icache_block_size;        /* L1 i-cache block size     */
-       __u32 dcache_log_block_size;    /* L1 d-cache log block size */
-       __u32 icache_log_block_size;    /* L1 i-cache log block size */
 };
 
 #endif /* CONFIG_PPC64 */
 
 extern struct vdso_data *vdso_data;
 
+#else /* __ASSEMBLY__ */
+
+.macro get_datapage ptr, tmp
+       bcl     20, 31, .+4
+       mflr    \ptr
+       addi    \ptr, \ptr, (__kernel_datapage_offset - (.-4))@l
+       lwz     \tmp, 0(\ptr)
+       add     \ptr, \tmp, \ptr
+.endm
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index 24cdf97..93f982d 100644 (file)
@@ -87,56 +87,56 @@ extern bool __xive_enabled;
 
 static inline bool xive_enabled(void) { return __xive_enabled; }
 
-extern bool xive_spapr_init(void);
-extern bool xive_native_init(void);
-extern void xive_smp_probe(void);
-extern int  xive_smp_prepare_cpu(unsigned int cpu);
-extern void xive_smp_setup_cpu(void);
-extern void xive_smp_disable_cpu(void);
-extern void xive_teardown_cpu(void);
-extern void xive_shutdown(void);
-extern void xive_flush_interrupt(void);
+bool xive_spapr_init(void);
+bool xive_native_init(void);
+void xive_smp_probe(void);
+int  xive_smp_prepare_cpu(unsigned int cpu);
+void xive_smp_setup_cpu(void);
+void xive_smp_disable_cpu(void);
+void xive_teardown_cpu(void);
+void xive_shutdown(void);
+void xive_flush_interrupt(void);
 
 /* xmon hook */
-extern void xmon_xive_do_dump(int cpu);
-extern int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
+void xmon_xive_do_dump(int cpu);
+int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
 
 /* APIs used by KVM */
-extern u32 xive_native_default_eq_shift(void);
-extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
-extern void xive_native_free_vp_block(u32 vp_base);
-extern int xive_native_populate_irq_data(u32 hw_irq,
-                                        struct xive_irq_data *data);
-extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
-extern u32 xive_native_alloc_irq(void);
-extern void xive_native_free_irq(u32 irq);
-extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
-
-extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
-                                      __be32 *qpage, u32 order, bool can_escalate);
-extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
-
-extern void xive_native_sync_source(u32 hw_irq);
-extern void xive_native_sync_queue(u32 hw_irq);
-extern bool is_xive_irq(struct irq_chip *chip);
-extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
-extern int xive_native_disable_vp(u32 vp_id);
-extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
-extern bool xive_native_has_single_escalation(void);
-
-extern int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
-                                     u64 *out_qpage,
-                                     u64 *out_qsize,
-                                     u64 *out_qeoi_page,
-                                     u32 *out_escalate_irq,
-                                     u64 *out_qflags);
-
-extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
-                                      u32 *qindex);
-extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
-                                      u32 qindex);
-extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
-extern bool xive_native_has_queue_state_support(void);
+u32 xive_native_default_eq_shift(void);
+u32 xive_native_alloc_vp_block(u32 max_vcpus);
+void xive_native_free_vp_block(u32 vp_base);
+int xive_native_populate_irq_data(u32 hw_irq,
+                                 struct xive_irq_data *data);
+void xive_cleanup_irq_data(struct xive_irq_data *xd);
+u32 xive_native_alloc_irq(void);
+void xive_native_free_irq(u32 irq);
+int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
+
+int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
+                               __be32 *qpage, u32 order, bool can_escalate);
+void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
+
+void xive_native_sync_source(u32 hw_irq);
+void xive_native_sync_queue(u32 hw_irq);
+bool is_xive_irq(struct irq_chip *chip);
+int xive_native_enable_vp(u32 vp_id, bool single_escalation);
+int xive_native_disable_vp(u32 vp_id);
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
+bool xive_native_has_single_escalation(void);
+
+int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
+                              u64 *out_qpage,
+                              u64 *out_qsize,
+                              u64 *out_qeoi_page,
+                              u32 *out_escalate_irq,
+                              u64 *out_qflags);
+
+int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
+                               u32 *qindex);
+int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
+                               u32 qindex);
+int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
+bool xive_native_has_queue_state_support(void);
 
 #else
 
index 157b014..78a1b22 100644 (file)
@@ -62,8 +62,7 @@ obj-$(CONFIG_PPC_BOOK3E_64)   += exceptions-64e.o idle_book3e.o
 obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
 obj-$(CONFIG_PPC64)            += vdso64/
 obj-$(CONFIG_ALTIVEC)          += vecemu.o
-obj-$(CONFIG_PPC_970_NAP)      += idle_power4.o
-obj-$(CONFIG_PPC_P7_NAP)       += idle_book3s.o
+obj-$(CONFIG_PPC_BOOK3S_IDLE)  += idle_book3s.o
 procfs-y                       := proc_powerpc.o
 obj-$(CONFIG_PROC_FS)          += $(procfs-y)
 rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI)  := rtas_pci.o
index 3d47aec..c25e562 100644 (file)
@@ -127,6 +127,12 @@ int main(void)
        OFFSET(KSP_VSID, thread_struct, ksp_vsid);
 #else /* CONFIG_PPC64 */
        OFFSET(PGDIR, thread_struct, pgdir);
+#ifdef CONFIG_VMAP_STACK
+       OFFSET(SRR0, thread_struct, srr0);
+       OFFSET(SRR1, thread_struct, srr1);
+       OFFSET(DAR, thread_struct, dar);
+       OFFSET(DSISR, thread_struct, dsisr);
+#endif
 #ifdef CONFIG_SPE
        OFFSET(THREAD_EVR0, thread_struct, evr[0]);
        OFFSET(THREAD_ACC, thread_struct, acc);
@@ -389,11 +395,11 @@ int main(void)
        OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec);
        OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction);
        OFFSET(CLOCK_HRTIMER_RES, vdso_data, hrtimer_res);
+#ifdef CONFIG_PPC64
        OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size);
        OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size);
        OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size);
        OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size);
-#ifdef CONFIG_PPC64
        OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64);
        OFFSET(TVAL64_TV_SEC, __kernel_old_timeval, tv_sec);
        OFFSET(TVAL64_TV_USEC, __kernel_old_timeval, tv_usec);
@@ -413,7 +419,10 @@ int main(void)
        DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
        DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
        DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
+       DEFINE(CLOCK_MAX, CLOCK_TAI);
        DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
+       DEFINE(EINVAL, EINVAL);
+       DEFINE(KTIME_LOW_RES, KTIME_LOW_RES);
 
 #ifdef CONFIG_BUG
        DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
index 180b3a5..182b404 100644 (file)
@@ -727,17 +727,20 @@ static __init void cpufeatures_cpu_quirks(void)
        /*
         * Not all quirks can be derived from the cpufeatures device tree.
         */
-       if ((version & 0xffffefff) == 0x004e0200)
-               ; /* DD2.0 has no feature flag */
-       else if ((version & 0xffffefff) == 0x004e0201)
+       if ((version & 0xffffefff) == 0x004e0200) {
+               /* DD2.0 has no feature flag */
+               cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+       } else if ((version & 0xffffefff) == 0x004e0201) {
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
-       else if ((version & 0xffffefff) == 0x004e0202) {
+               cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+       } else if ((version & 0xffffefff) == 0x004e0202) {
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
-       } else if ((version & 0xffff0000) == 0x004e0000)
+       } else if ((version & 0xffff0000) == 0x004e0000) {
                /* DD2.1 and up have DD2_1 */
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+       }
 
        if ((version & 0xffff0000) == 0x004e0000) {
                cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
index bc8a551..17cb3e9 100644 (file)
@@ -503,7 +503,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
        rc = 1;
        if (pe->state & EEH_PE_ISOLATED) {
                pe->check_count++;
-               if (pe->check_count % EEH_MAX_FAILS == 0) {
+               if (pe->check_count == EEH_MAX_FAILS) {
                        dn = pci_device_to_OF_node(dev);
                        if (dn)
                                location = of_get_property(dn, "ibm,loc-code",
@@ -1191,7 +1191,6 @@ void eeh_add_device_late(struct pci_dev *dev)
                eeh_rmv_from_parent_pe(edev);
                eeh_addr_cache_rmv_dev(edev->pdev);
                eeh_sysfs_remove_device(edev->pdev);
-               edev->mode &= ~EEH_DEV_SYSFS;
 
                /*
                 * We definitely should have the PCI device removed
@@ -1296,17 +1295,11 @@ void eeh_remove_device(struct pci_dev *dev)
        edev->pdev = NULL;
 
        /*
-        * The flag "in_error" is used to trace EEH devices for VFs
-        * in error state or not. It's set in eeh_report_error(). If
-        * it's not set, eeh_report_{reset,resume}() won't be called
-        * for the VF EEH device.
+        * eeh_sysfs_remove_device() uses pci_dev_to_eeh_dev() so we need to
+        * remove the sysfs files before clearing dev.archdata.edev
         */
-       edev->in_error = false;
-       dev->dev.archdata.edev = NULL;
-       if (!(edev->pe->state & EEH_PE_KEEP))
-               eeh_rmv_from_parent_pe(edev);
-       else
-               edev->mode |= EEH_DEV_DISCONNECTED;
+       if (edev->mode & EEH_DEV_SYSFS)
+               eeh_sysfs_remove_device(dev);
 
        /*
         * We're removing from the PCI subsystem, that means
@@ -1317,8 +1310,19 @@ void eeh_remove_device(struct pci_dev *dev)
        edev->mode |= EEH_DEV_NO_HANDLER;
 
        eeh_addr_cache_rmv_dev(dev);
-       eeh_sysfs_remove_device(dev);
-       edev->mode &= ~EEH_DEV_SYSFS;
+
+       /*
+        * The flag "in_error" is used to trace EEH devices for VFs
+        * in error state or not. It's set in eeh_report_error(). If
+        * it's not set, eeh_report_{reset,resume}() won't be called
+        * for the VF EEH device.
+        */
+       edev->in_error = false;
+       dev->dev.archdata.edev = NULL;
+       if (!(edev->pe->state & EEH_PE_KEEP))
+               eeh_rmv_from_parent_pe(edev);
+       else
+               edev->mode |= EEH_DEV_DISCONNECTED;
 }
 
 int eeh_unfreeze_pe(struct eeh_pe *pe)
index cf11277..6b50bf1 100644 (file)
@@ -159,18 +159,10 @@ eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
 
 static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
 {
-       struct pci_dn *pdn;
        struct eeh_dev *edev;
        int i;
 
-       pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
-       if (!pdn) {
-               pr_warn("PCI: no pci dn found for dev=%s\n",
-                       pci_name(dev));
-               return;
-       }
-
-       edev = pdn_to_eeh_dev(pdn);
+       edev = pci_dev_to_eeh_dev(dev);
        if (!edev) {
                pr_warn("PCI: no EEH dev found for %s\n",
                        pci_name(dev));
index 3dd1a42..a1eaffe 100644 (file)
@@ -525,12 +525,6 @@ static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
 
                pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
                edev->pdev = NULL;
-
-               /*
-                * We have to set the VF PE number to invalid one, which is
-                * required to plug the VF successfully.
-                */
-               pdn->pe_number = IODA_INVALID_PE;
 #endif
                if (rmv_data)
                        list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
index ab44d96..4fb0f1e 100644 (file)
@@ -14,7 +14,7 @@
 /**
  * EEH_SHOW_ATTR -- Create sysfs entry for eeh statistic
  * @_name: name of file in sysfs directory
- * @_memb: name of member in struct pci_dn to access
+ * @_memb: name of member in struct eeh_dev to access
  * @_format: printf format for display
  *
  * All of the attributes look very similar, so just
@@ -75,7 +75,7 @@ static ssize_t eeh_pe_state_store(struct device *dev,
 
 static DEVICE_ATTR_RW(eeh_pe_state);
 
-#ifdef CONFIG_PCI_IOV
+#if defined(CONFIG_PCI_IOV) && defined(CONFIG_PPC_PSERIES)
 static ssize_t eeh_notify_resume_show(struct device *dev,
                                      struct device_attribute *attr, char *buf)
 {
@@ -86,7 +86,6 @@ static ssize_t eeh_notify_resume_show(struct device *dev,
        if (!edev || !edev->pe)
                return -ENODEV;
 
-       pdn = pci_get_pdn(pdev);
        return sprintf(buf, "%d\n", pdn->last_allow_rc);
 }
 
@@ -132,7 +131,7 @@ static void eeh_notify_resume_remove(struct pci_dev *pdev)
 #else
 static inline int eeh_notify_resume_add(struct pci_dev *pdev) { return 0; }
 static inline void eeh_notify_resume_remove(struct pci_dev *pdev) { }
-#endif /* CONFIG_PCI_IOV */
+#endif /* CONFIG_PCI_IOV && CONFIG PPC_PSERIES*/
 
 void eeh_sysfs_add_device(struct pci_dev *pdev)
 {
@@ -160,22 +159,23 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
 {
        struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
 
+       if (!edev) {
+               WARN_ON(eeh_enabled());
+               return;
+       }
+
+       edev->mode &= ~EEH_DEV_SYSFS;
+
        /*
         * The parent directory might have been removed. We needn't
         * continue for that case.
         */
-       if (!pdev->dev.kobj.sd) {
-               if (edev)
-                       edev->mode &= ~EEH_DEV_SYSFS;
+       if (!pdev->dev.kobj.sd)
                return;
-       }
 
        device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
        device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
        device_remove_file(&pdev->dev, &dev_attr_eeh_pe_state);
 
        eeh_notify_resume_remove(pdev);
-
-       if (edev)
-               edev->mode &= ~EEH_DEV_SYSFS;
 }
index e1a4c39..77abbc3 100644 (file)
@@ -140,6 +140,7 @@ transfer_to_handler:
        stw     r12,_CTR(r11)
        stw     r2,_XER(r11)
        mfspr   r12,SPRN_SPRG_THREAD
+       tovirt_vmstack r12, r12
        beq     2f                      /* if from user, fix up THREAD.regs */
        addi    r2, r12, -THREAD
        addi    r11,r1,STACK_FRAME_OVERHEAD
@@ -179,11 +180,13 @@ transfer_to_handler:
 2:     /* if from kernel, check interrupted DOZE/NAP mode and
          * check for stack overflow
          */
-       kuap_save_and_lock r11, r12, r9, r2, r0
+       kuap_save_and_lock r11, r12, r9, r2, r6
        addi    r2, r12, -THREAD
+#ifndef CONFIG_VMAP_STACK
        lwz     r9,KSP_LIMIT(r12)
        cmplw   r1,r9                   /* if r1 <= ksp_limit */
        ble-    stack_ovf               /* then the kernel stack overflowed */
+#endif
 5:
 #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
        lwz     r12,TI_LOCAL_FLAGS(r2)
@@ -195,7 +198,8 @@ transfer_to_handler:
 transfer_to_handler_cont:
 3:
        mflr    r9
-       tovirt(r2, r2)                  /* set r2 to current */
+       tovirt_novmstack r2, r2         /* set r2 to current */
+       tovirt_vmstack r9, r9
        lwz     r11,0(r9)               /* virtual address of handler */
        lwz     r9,4(r9)                /* where to go when done */
 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
@@ -284,9 +288,11 @@ reenable_mmu:
        rlwinm  r9,r9,0,~MSR_EE
        lwz     r12,_LINK(r11)          /* and return to address in LR */
        kuap_restore r11, r2, r3, r4, r5
+       lwz     r2, GPR2(r11)
        b       fast_exception_return
 #endif
 
+#ifndef CONFIG_VMAP_STACK
 /*
  * On kernel stack overflow, load up an initial stack pointer
  * and call StackOverflow(regs), which should not return.
@@ -312,6 +318,7 @@ stack_ovf:
        mtspr   SPRN_SRR1,r10
        SYNC
        RFI
+#endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 trace_syscall_entry_irq_off:
@@ -397,7 +404,7 @@ ret_from_syscall:
        LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)      /* doesn't include MSR_EE */
        /* Note: We don't bother telling lockdep about it */
        SYNC
-       MTMSRD(r10)
+       mtmsr   r10
        lwz     r9,TI_FLAGS(r2)
        li      r8,-MAX_ERRNO
        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
@@ -554,7 +561,7 @@ syscall_exit_work:
         */
        ori     r10,r10,MSR_EE
        SYNC
-       MTMSRD(r10)
+       mtmsr   r10
 
        /* Save NVGPRS if they're not saved already */
        lwz     r4,_TRAP(r1)
@@ -621,7 +628,6 @@ ppc_swapcontext:
  */
        .globl  handle_page_fault
 handle_page_fault:
-       stw     r4,_DAR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
 #ifdef CONFIG_PPC_BOOK3S_32
        andis.  r0,r5,DSISR_DABRMATCH@h
@@ -697,7 +703,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
        and.    r0,r0,r11       /* FP or altivec or SPE enabled? */
        beq+    1f
        andc    r11,r11,r0
-       MTMSRD(r11)
+       mtmsr   r11
        isync
 1:     stw     r11,_MSR(r1)
        mfcr    r10
@@ -831,7 +837,7 @@ ret_from_except:
        /* Note: We don't bother telling lockdep about it */
        LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
        SYNC                    /* Some chip revs have problems here... */
-       MTMSRD(r10)             /* disable interrupts */
+       mtmsr   r10             /* disable interrupts */
 
        lwz     r3,_MSR(r1)     /* Returning to user mode? */
        andi.   r0,r3,MSR_PR
@@ -998,7 +1004,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
         */
        LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
        SYNC
-       MTMSRD(r10)             /* clear the RI bit */
+       mtmsr   r10             /* clear the RI bit */
        .globl exc_exit_restart
 exc_exit_restart:
        lwz     r12,_NIP(r1)
@@ -1234,7 +1240,7 @@ do_resched:                       /* r10 contains MSR_KERNEL here */
 #endif
        ori     r10,r10,MSR_EE
        SYNC
-       MTMSRD(r10)             /* hard-enable interrupts */
+       mtmsr   r10             /* hard-enable interrupts */
        bl      schedule
 recheck:
        /* Note: And we don't tell it we are disabling them again
@@ -1243,7 +1249,7 @@ recheck:
         */
        LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
        SYNC
-       MTMSRD(r10)             /* disable interrupts */
+       mtmsr   r10             /* disable interrupts */
        lwz     r9,TI_FLAGS(r2)
        andi.   r0,r9,_TIF_NEED_RESCHED
        bne-    do_resched
@@ -1252,7 +1258,7 @@ recheck:
 do_user_signal:                        /* r10 contains MSR_KERNEL here */
        ori     r10,r10,MSR_EE
        SYNC
-       MTMSRD(r10)             /* hard-enable interrupts */
+       mtmsr   r10             /* hard-enable interrupts */
        /* save r13-r31 in the exception frame, if not already done */
        lwz     r3,_TRAP(r1)
        andi.   r0,r3,1
@@ -1334,14 +1340,14 @@ _GLOBAL(enter_rtas)
        lis     r6,1f@ha        /* physical return address for rtas */
        addi    r6,r6,1f@l
        tophys(r6,r6)
-       tophys(r7,r1)
+       tophys_novmstack r7, r1
        lwz     r8,RTASENTRY(r4)
        lwz     r4,RTASBASE(r4)
        mfmsr   r9
        stw     r9,8(r1)
        LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
        SYNC                    /* disable interrupts so SRR0/1 */
-       MTMSRD(r0)              /* don't get trashed */
+       mtmsr   r0              /* don't get trashed */
        li      r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
        mtlr    r6
        stw     r7, THREAD + RTAS_SP(r2)
index a9a1d3c..6ba675b 100644 (file)
@@ -597,8 +597,7 @@ _GLOBAL(_switch)
        std     r0,16(r1)
        stdu    r1,-SWITCH_FRAME_SIZE(r1)
        /* r3-r13 are caller saved -- Cort */
-       SAVE_8GPRS(14, r1)
-       SAVE_10GPRS(22, r1)
+       SAVE_NVGPRS(r1)
        std     r0,_NIP(r1)     /* Return to switch caller */
        mfcr    r23
        std     r23,_CCR(r1)
@@ -722,8 +721,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        mtcrf   0xFF,r6
 
        /* r3-r13 are destroyed -- Cort */
-       REST_8GPRS(14, r1)
-       REST_10GPRS(22, r1)
+       REST_NVGPRS(r1)
 
        /* convert old thread to its task_struct for return value */
        addi    r3,r3,-THREAD
@@ -1155,8 +1153,7 @@ _GLOBAL(enter_rtas)
         */
        SAVE_GPR(2, r1)                 /* Save the TOC */
        SAVE_GPR(13, r1)                /* Save paca */
-       SAVE_8GPRS(14, r1)              /* Save the non-volatiles */
-       SAVE_10GPRS(22, r1)             /* ditto */
+       SAVE_NVGPRS(r1)                 /* Save the non-volatiles */
 
        mfcr    r4
        std     r4,_CCR(r1)
@@ -1263,8 +1260,7 @@ rtas_restore_regs:
        /* relocation is on at this point */
        REST_GPR(2, r1)                 /* Restore the TOC */
        REST_GPR(13, r1)                /* Restore paca */
-       REST_8GPRS(14, r1)              /* Restore the non-volatiles */
-       REST_10GPRS(22, r1)             /* ditto */
+       REST_NVGPRS(r1)                 /* Restore the non-volatiles */
 
        GET_PACA(r13)
 
@@ -1298,8 +1294,7 @@ _GLOBAL(enter_prom)
         */
        SAVE_GPR(2, r1)
        SAVE_GPR(13, r1)
-       SAVE_8GPRS(14, r1)
-       SAVE_10GPRS(22, r1)
+       SAVE_NVGPRS(r1)
        mfcr    r10
        mfmsr   r11
        std     r10,_CCR(r1)
@@ -1343,8 +1338,7 @@ _GLOBAL(enter_prom)
        /* Restore other registers */
        REST_GPR(2, r1)
        REST_GPR(13, r1)
-       REST_8GPRS(14, r1)
-       REST_10GPRS(22, r1)
+       REST_NVGPRS(r1)
        ld      r4,_CCR(r1)
        mtcr    r4
 
index 46508b1..ffc15f4 100644 (file)
@@ -1408,22 +1408,9 @@ EXC_VIRT_NONE(0x4b00, 0x100)
  *
  * Call convention:
  *
- * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
- *
- * For hypercalls, the register convention is as follows:
- * r0 volatile
- * r1-2 nonvolatile
- * r3 volatile parameter and return value for status
- * r4-r10 volatile input and output value
- * r11 volatile hypercall number and output value
- * r12 volatile input and output value
- * r13-r31 nonvolatile
- * LR nonvolatile
- * CTR volatile
- * XER volatile
- * CR0-1 CR5-7 volatile
- * CR2-4 nonvolatile
- * Other registers nonvolatile
+ * syscall and hypercalls register conventions are documented in
+ * Documentation/powerpc/syscall64-abi.rst and
+ * Documentation/powerpc/papr_hcalls.rst respectively.
  *
  * The intersection of volatile registers that don't contain possible
  * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
@@ -2208,11 +2195,20 @@ __end_interrupts:
 DEFINE_FIXED_SYMBOL(__end_interrupts)
 
 #ifdef CONFIG_PPC_970_NAP
+       /*
+        * Called by exception entry code if _TLF_NAPPING was set, this clears
+        * the NAPPING flag, and redirects the exception exit to
+        * power4_fixup_nap_return.
+        */
+       .globl power4_fixup_nap
 EXC_COMMON_BEGIN(power4_fixup_nap)
        andc    r9,r9,r10
        std     r9,TI_LOCAL_FLAGS(r11)
-       ld      r10,_LINK(r1)           /* make idle task do the */
-       std     r10,_NIP(r1)            /* equivalent of a blr */
+       LOAD_REG_ADDR(r10, power4_idle_nap_return)
+       std     r10,_NIP(r1)
+       blr
+
+power4_idle_nap_return:
        blr
 #endif
 
index 0bb991d..3235a8d 100644 (file)
@@ -94,6 +94,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        /* enable use of FP after return */
 #ifdef CONFIG_PPC32
        mfspr   r5,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
+#ifdef CONFIG_VMAP_STACK
+       tovirt(r5, r5)
+#endif
        lwz     r4,THREAD_FPEXC_MODE(r5)
        ori     r9,r9,MSR_FP            /* enable FP for current */
        or      r9,r9,r4
index 4a24f8f..0493fca 100644 (file)
@@ -272,16 +272,21 @@ __secondary_hold_acknowledge:
  */
        . = 0x200
        DO_KVM  0x200
-       mtspr   SPRN_SPRG_SCRATCH0,r10
-       mtspr   SPRN_SPRG_SCRATCH1,r11
-       mfcr    r10
+MachineCheck:
+       EXCEPTION_PROLOG_0
+#ifdef CONFIG_VMAP_STACK
+       li      r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+       mtmsr   r11
+       isync
+#endif
 #ifdef CONFIG_PPC_CHRP
        mfspr   r11, SPRN_SPRG_THREAD
+       tovirt_vmstack r11, r11
        lwz     r11, RTAS_SP(r11)
        cmpwi   cr1, r11, 0
        bne     cr1, 7f
 #endif /* CONFIG_PPC_CHRP */
-       EXCEPTION_PROLOG_1
+       EXCEPTION_PROLOG_1 for_rtas=1
 7:     EXCEPTION_PROLOG_2
        addi    r3,r1,STACK_FRAME_OVERHEAD
 #ifdef CONFIG_PPC_CHRP
@@ -296,24 +301,21 @@ __secondary_hold_acknowledge:
        . = 0x300
        DO_KVM  0x300
 DataAccess:
-       EXCEPTION_PROLOG
-       mfspr   r10,SPRN_DSISR
-       stw     r10,_DSISR(r11)
+       EXCEPTION_PROLOG handle_dar_dsisr=1
+       get_and_save_dar_dsisr_on_stack r4, r5, r11
+BEGIN_MMU_FTR_SECTION
 #ifdef CONFIG_PPC_KUAP
-       andis.  r0,r10,(DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
+       andis.  r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
 #else
-       andis.  r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h
+       andis.  r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
 #endif
-       bne     1f                      /* if not, try to put a PTE */
-       mfspr   r4,SPRN_DAR             /* into the hash table */
-       rlwinm  r3,r10,32-15,21,21      /* DSISR_STORE -> _PAGE_RW */
-BEGIN_MMU_FTR_SECTION
+       bne     handle_page_fault_tramp_2       /* if not, try to put a PTE */
+       rlwinm  r3, r5, 32 - 15, 21, 21         /* DSISR_STORE -> _PAGE_RW */
        bl      hash_page
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
-1:     lwz     r5,_DSISR(r11)          /* get DSISR value */
-       mfspr   r4,SPRN_DAR
-       EXC_XFER_LITE(0x300, handle_page_fault)
-
+       b       handle_page_fault_tramp_1
+FTR_SECTION_ELSE
+       b       handle_page_fault_tramp_2
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
 
 /* Instruction access exception. */
        . = 0x400
@@ -329,6 +331,7 @@ BEGIN_MMU_FTR_SECTION
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
 1:     mr      r4,r12
        andis.  r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
+       stw     r4, _DAR(r11)
        EXC_XFER_LITE(0x400, handle_page_fault)
 
 /* External interrupt */
@@ -338,11 +341,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
        . = 0x600
        DO_KVM  0x600
 Alignment:
-       EXCEPTION_PROLOG
-       mfspr   r4,SPRN_DAR
-       stw     r4,_DAR(r11)
-       mfspr   r5,SPRN_DSISR
-       stw     r5,_DSISR(r11)
+       EXCEPTION_PROLOG handle_dar_dsisr=1
+       save_dar_dsisr_on_stack r4, r5, r11
        addi    r3,r1,STACK_FRAME_OVERHEAD
        EXC_XFER_STD(0x600, alignment_exception)
 
@@ -645,6 +645,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
 
        . = 0x3000
 
+handle_page_fault_tramp_1:
+       lwz     r4, _DAR(r11)
+       lwz     r5, _DSISR(r11)
+       /* fall through */
+handle_page_fault_tramp_2:
+       EXC_XFER_LITE(0x300, handle_page_fault)
+
+stack_overflow:
+       vmap_stack_overflow_exception
+
 AltiVecUnavailable:
        EXCEPTION_PROLOG
 #ifdef CONFIG_ALTIVEC
@@ -917,6 +927,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
        ori     r4,r4,2f@l
        tophys(r4,r4)
        li      r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+
+       .align  4
        mtspr   SPRN_SRR0,r4
        mtspr   SPRN_SRR1,r3
        SYNC
@@ -1058,6 +1070,8 @@ _ENTRY(update_bats)
        rlwinm  r0, r6, 0, ~MSR_RI
        rlwinm  r0, r0, 0, ~MSR_EE
        mtmsr   r0
+
+       .align  4
        mtspr   SPRN_SRR0, r4
        mtspr   SPRN_SRR1, r3
        SYNC
@@ -1097,6 +1111,8 @@ mmu_off:
        andi.   r0,r3,MSR_DR|MSR_IR             /* MMU enabled? */
        beqlr
        andc    r3,r3,r0
+
+       .align  4
        mtspr   SPRN_SRR0,r4
        mtspr   SPRN_SRR1,r3
        sync
index 8abc778..a6a5fbb 100644 (file)
  * We assume sprg3 has the physical address of the current
  * task's thread_struct.
  */
+.macro EXCEPTION_PROLOG handle_dar_dsisr=0
+       EXCEPTION_PROLOG_0      handle_dar_dsisr=\handle_dar_dsisr
+       EXCEPTION_PROLOG_1
+       EXCEPTION_PROLOG_2      handle_dar_dsisr=\handle_dar_dsisr
+.endm
 
-.macro EXCEPTION_PROLOG
+.macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
        mtspr   SPRN_SPRG_SCRATCH0,r10
        mtspr   SPRN_SPRG_SCRATCH1,r11
+#ifdef CONFIG_VMAP_STACK
+       mfspr   r10, SPRN_SPRG_THREAD
+       .if     \handle_dar_dsisr
+       mfspr   r11, SPRN_DAR
+       stw     r11, DAR(r10)
+       mfspr   r11, SPRN_DSISR
+       stw     r11, DSISR(r10)
+       .endif
+       mfspr   r11, SPRN_SRR0
+       stw     r11, SRR0(r10)
+#endif
+       mfspr   r11, SPRN_SRR1          /* check whether user or kernel */
+#ifdef CONFIG_VMAP_STACK
+       stw     r11, SRR1(r10)
+#endif
        mfcr    r10
-       EXCEPTION_PROLOG_1
-       EXCEPTION_PROLOG_2
+       andi.   r11, r11, MSR_PR
 .endm
 
-.macro EXCEPTION_PROLOG_1
-       mfspr   r11,SPRN_SRR1           /* check whether user or kernel */
-       andi.   r11,r11,MSR_PR
+.macro EXCEPTION_PROLOG_1 for_rtas=0
+#ifdef CONFIG_VMAP_STACK
+       .ifeq   \for_rtas
+       li      r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+       mtmsr   r11
+       isync
+       .endif
+       subi    r11, r1, INT_FRAME_SIZE         /* use r1 if kernel */
+#else
        tophys(r11,r1)                  /* use tophys(r1) if kernel */
+       subi    r11, r11, INT_FRAME_SIZE        /* alloc exc. frame */
+#endif
        beq     1f
        mfspr   r11,SPRN_SPRG_THREAD
+       tovirt_vmstack r11, r11
        lwz     r11,TASK_STACK-THREAD(r11)
-       addi    r11,r11,THREAD_SIZE
-       tophys(r11,r11)
-1:     subi    r11,r11,INT_FRAME_SIZE  /* alloc exc. frame */
+       addi    r11, r11, THREAD_SIZE - INT_FRAME_SIZE
+       tophys_novmstack r11, r11
+1:
+#ifdef CONFIG_VMAP_STACK
+       mtcrf   0x7f, r11
+       bt      32 - THREAD_ALIGN_SHIFT, stack_overflow
+#endif
 .endm
 
-.macro EXCEPTION_PROLOG_2
+.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
        stw     r10,_CCR(r11)           /* save registers */
        stw     r12,GPR12(r11)
        stw     r9,GPR9(r11)
        stw     r12,GPR11(r11)
        mflr    r10
        stw     r10,_LINK(r11)
+#ifdef CONFIG_VMAP_STACK
+       mfspr   r12, SPRN_SPRG_THREAD
+       tovirt(r12, r12)
+       .if     \handle_dar_dsisr
+       lwz     r10, DAR(r12)
+       stw     r10, _DAR(r11)
+       lwz     r10, DSISR(r12)
+       stw     r10, _DSISR(r11)
+       .endif
+       lwz     r9, SRR1(r12)
+       lwz     r12, SRR0(r12)
+#else
        mfspr   r12,SPRN_SRR0
        mfspr   r9,SPRN_SRR1
+#endif
        stw     r1,GPR1(r11)
        stw     r1,0(r11)
-       tovirt(r1,r11)                  /* set new kernel sp */
+       tovirt_novmstack r1, r11        /* set new kernel sp */
 #ifdef CONFIG_40x
        rlwinm  r9,r9,0,14,12           /* clear MSR_WE (necessary?) */
+#else
+#ifdef CONFIG_VMAP_STACK
+       li      r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
 #else
        li      r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
-       MTMSRD(r10)                     /* (except for mach check in rtas) */
+#endif
+       mtmsr   r10                     /* (except for mach check in rtas) */
 #endif
        stw     r0,GPR0(r11)
        lis     r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
 
 .macro SYSCALL_ENTRY trapno
        mfspr   r12,SPRN_SPRG_THREAD
+#ifdef CONFIG_VMAP_STACK
+       mfspr   r9, SPRN_SRR0
+       mfspr   r11, SPRN_SRR1
+       stw     r9, SRR0(r12)
+       stw     r11, SRR1(r12)
+#endif
        mfcr    r10
        lwz     r11,TASK_STACK-THREAD(r12)
-       mflr    r9
-       addi    r11,r11,THREAD_SIZE - INT_FRAME_SIZE
        rlwinm  r10,r10,0,4,2   /* Clear SO bit in CR */
-       tophys(r11,r11)
+       addi    r11, r11, THREAD_SIZE - INT_FRAME_SIZE
+#ifdef CONFIG_VMAP_STACK
+       li      r9, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+       mtmsr   r9
+       isync
+#endif
+       tovirt_vmstack r12, r12
+       tophys_novmstack r11, r11
+       mflr    r9
        stw     r10,_CCR(r11)           /* save registers */
+       stw     r9, _LINK(r11)
+#ifdef CONFIG_VMAP_STACK
+       lwz     r10, SRR0(r12)
+       lwz     r9, SRR1(r12)
+#else
        mfspr   r10,SPRN_SRR0
-       stw     r9,_LINK(r11)
        mfspr   r9,SPRN_SRR1
+#endif
        stw     r1,GPR1(r11)
        stw     r1,0(r11)
-       tovirt(r1,r11)                  /* set new kernel sp */
+       tovirt_novmstack r1, r11        /* set new kernel sp */
        stw     r10,_NIP(r11)
 #ifdef CONFIG_40x
        rlwinm  r9,r9,0,14,12           /* clear MSR_WE (necessary?) */
+#else
+#ifdef CONFIG_VMAP_STACK
+       LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
 #else
        LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
-       MTMSRD(r10)                     /* (except for mach check in rtas) */
+#endif
+       mtmsr   r10                     /* (except for mach check in rtas) */
 #endif
        lis     r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
        stw     r2,GPR2(r11)
 #endif
 
 3:
-       tovirt(r2, r2)                  /* set r2 to current */
+       tovirt_novmstack r2, r2         /* set r2 to current */
        lis     r11, transfer_to_syscall@h
        ori     r11, r11, transfer_to_syscall@l
 #ifdef CONFIG_TRACE_IRQFLAGS
        RFI                             /* jump to handler, enable MMU */
 .endm
 
+.macro save_dar_dsisr_on_stack reg1, reg2, sp
+#ifndef CONFIG_VMAP_STACK
+       mfspr   \reg1, SPRN_DAR
+       mfspr   \reg2, SPRN_DSISR
+       stw     \reg1, _DAR(\sp)
+       stw     \reg2, _DSISR(\sp)
+#endif
+.endm
+
+.macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
+#ifdef CONFIG_VMAP_STACK
+       lwz     \reg1, _DAR(\sp)
+       lwz     \reg2, _DSISR(\sp)
+#else
+       save_dar_dsisr_on_stack \reg1, \reg2, \sp
+#endif
+.endm
+
+.macro tovirt_vmstack dst, src
+#ifdef CONFIG_VMAP_STACK
+       tovirt(\dst, \src)
+#else
+       .ifnc   \dst, \src
+       mr      \dst, \src
+       .endif
+#endif
+.endm
+
+.macro tovirt_novmstack dst, src
+#ifndef CONFIG_VMAP_STACK
+       tovirt(\dst, \src)
+#else
+       .ifnc   \dst, \src
+       mr      \dst, \src
+       .endif
+#endif
+.endm
+
+.macro tophys_novmstack dst, src
+#ifndef CONFIG_VMAP_STACK
+       tophys(\dst, \src)
+#else
+       .ifnc   \dst, \src
+       mr      \dst, \src
+       .endif
+#endif
+.endm
+
 /*
  * Note: code which follows this uses cr0.eq (set if from kernel),
  * r11, r12 (SRR0), and r9 (SRR1).
@@ -187,4 +305,28 @@ label:
        EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
                          ret_from_except)
 
+.macro vmap_stack_overflow_exception
+#ifdef CONFIG_VMAP_STACK
+#ifdef CONFIG_SMP
+       mfspr   r11, SPRN_SPRG_THREAD
+       tovirt(r11, r11)
+       lwz     r11, TASK_CPU - THREAD(r11)
+       slwi    r11, r11, 3
+       addis   r11, r11, emergency_ctx@ha
+#else
+       lis     r11, emergency_ctx@ha
+#endif
+       lwz     r11, emergency_ctx@l(r11)
+       cmpwi   cr1, r11, 0
+       bne     cr1, 1f
+       lis     r11, init_thread_union@ha
+       addi    r11, r11, init_thread_union@l
+1:     addi    r11, r11, THREAD_SIZE - INT_FRAME_SIZE
+       EXCEPTION_PROLOG_2
+       SAVE_NVGPRS(r11)
+       addi    r3, r1, STACK_FRAME_OVERHEAD
+       EXC_XFER_STD(0, stack_overflow_exception)
+#endif
+.endm
+
 #endif /* __HEAD_32_H__ */
index 585ea19..9bb6639 100644 (file)
@@ -313,6 +313,7 @@ _ENTRY(saved_ksp_limit)
        START_EXCEPTION(0x0400, InstructionAccess)
        EXCEPTION_PROLOG
        mr      r4,r12                  /* Pass SRR0 as arg2 */
+       stw     r4, _DEAR(r11)
        li      r5,0                    /* Pass zero as arg3 */
        EXC_XFER_LITE(0x400, handle_page_fault)
 
@@ -676,6 +677,7 @@ DataAccess:
        mfspr   r5,SPRN_ESR             /* Grab the ESR, save it, pass arg3 */
        stw     r5,_ESR(r11)
        mfspr   r4,SPRN_DEAR            /* Grab the DEAR, save it, pass arg2 */
+       stw     r4, _DEAR(r11)
        EXC_XFER_LITE(0x300, handle_page_fault)
 
 /* Other PowerPC processors, namely those derived from the 6xx-series
index 19f583e..9922306 100644 (file)
@@ -127,56 +127,36 @@ instruction_counter:
 /* Machine check */
        . = 0x200
 MachineCheck:
-       EXCEPTION_PROLOG
-       mfspr r4,SPRN_DAR
-       stw r4,_DAR(r11)
-       li r5,RPN_PATTERN
-       mtspr SPRN_DAR,r5       /* Tag DAR, to be used in DTLB Error */
-       mfspr r5,SPRN_DSISR
-       stw r5,_DSISR(r11)
+       EXCEPTION_PROLOG handle_dar_dsisr=1
+       save_dar_dsisr_on_stack r4, r5, r11
+       li      r6, RPN_PATTERN
+       mtspr   SPRN_DAR, r6    /* Tag DAR, to be used in DTLB Error */
        addi r3,r1,STACK_FRAME_OVERHEAD
        EXC_XFER_STD(0x200, machine_check_exception)
 
-/* Data access exception.
- * This is "never generated" by the MPC8xx.
- */
-       . = 0x300
-DataAccess:
-
-/* Instruction access exception.
- * This is "never generated" by the MPC8xx.
- */
-       . = 0x400
-InstructionAccess:
-
 /* External interrupt */
        EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
 
 /* Alignment exception */
        . = 0x600
 Alignment:
-       EXCEPTION_PROLOG
-       mfspr   r4,SPRN_DAR
-       stw     r4,_DAR(r11)
-       li      r5,RPN_PATTERN
-       mtspr   SPRN_DAR,r5     /* Tag DAR, to be used in DTLB Error */
-       mfspr   r5,SPRN_DSISR
-       stw     r5,_DSISR(r11)
+       EXCEPTION_PROLOG handle_dar_dsisr=1
+       save_dar_dsisr_on_stack r4, r5, r11
+       li      r6, RPN_PATTERN
+       mtspr   SPRN_DAR, r6    /* Tag DAR, to be used in DTLB Error */
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       EXC_XFER_STD(0x600, alignment_exception)
+       b       .Lalignment_exception_ool
 
 /* Program check exception */
        EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
 
-/* No FPU on MPC8xx.  This exception is not supposed to happen.
-*/
-       EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
-
 /* Decrementer */
        EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
 
-       EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
-       EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
+       /* With VMAP_STACK there's not enough room for this at 0x600 */
+       . = 0xa00
+.Lalignment_exception_ool:
+       EXC_XFER_STD(0x600, alignment_exception)
 
 /* System call */
        . = 0xc00
@@ -185,25 +165,12 @@ SystemCall:
 
 /* Single step - not used on 601 */
        EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
-       EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
-       EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_STD)
 
 /* On the MPC8xx, this is a software emulation interrupt.  It occurs
  * for all unimplemented and illegal instructions.
  */
        EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
 
-/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
-#ifdef CONFIG_PERF_EVENTS
-       patch_site      0f, patch__dtlbmiss_perf
-0:     lwz     r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
-       addi    r10, r10, 1
-       stw     r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
-       mfspr   r10, SPRN_SPRG_SCRATCH0
-       mfspr   r11, SPRN_SPRG_SCRATCH1
-       rfi
-#endif
-
        . = 0x1100
 /*
  * For the MPC8xx, this is a software tablewalk to load the instruction
@@ -343,8 +310,8 @@ ITLBMissLinear:
 
        . = 0x1200
 DataStoreTLBMiss:
-       mtspr   SPRN_SPRG_SCRATCH0, r10
-       mtspr   SPRN_SPRG_SCRATCH1, r11
+       mtspr   SPRN_DAR, r10
+       mtspr   SPRN_M_TW, r11
        mfcr    r11
 
        /* If we are faulting a kernel address, we have to use the
@@ -409,10 +376,10 @@ DataStoreTLBMiss:
        mtspr   SPRN_MD_RPN, r10        /* Update TLB entry */
 
        /* Restore registers */
-       mtspr   SPRN_DAR, r11   /* Tag DAR */
 
-0:     mfspr   r10, SPRN_SPRG_SCRATCH0
-       mfspr   r11, SPRN_SPRG_SCRATCH1
+0:     mfspr   r10, SPRN_DAR
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r11, SPRN_M_TW
        rfi
        patch_site      0b, patch__dtlbmiss_exit_1
 
@@ -428,10 +395,10 @@ DTLBMissIMMR:
        mtspr   SPRN_MD_RPN, r10        /* Update TLB entry */
 
        li      r11, RPN_PATTERN
-       mtspr   SPRN_DAR, r11   /* Tag DAR */
 
-0:     mfspr   r10, SPRN_SPRG_SCRATCH0
-       mfspr   r11, SPRN_SPRG_SCRATCH1
+0:     mfspr   r10, SPRN_DAR
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r11, SPRN_M_TW
        rfi
        patch_site      0b, patch__dtlbmiss_exit_2
 
@@ -465,10 +432,10 @@ DTLBMissLinear:
        mtspr   SPRN_MD_RPN, r10        /* Update TLB entry */
 
        li      r11, RPN_PATTERN
-       mtspr   SPRN_DAR, r11   /* Tag DAR */
 
-0:     mfspr   r10, SPRN_SPRG_SCRATCH0
-       mfspr   r11, SPRN_SPRG_SCRATCH1
+0:     mfspr   r10, SPRN_DAR
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r11, SPRN_M_TW
        rfi
        patch_site      0b, patch__dtlbmiss_exit_3
 
@@ -486,6 +453,7 @@ InstructionTLBError:
        tlbie   r4
        /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
 .Litlbie:
+       stw     r4, _DAR(r11)
        EXC_XFER_LITE(0x400, handle_page_fault)
 
 /* This is the data TLB error on the MPC8xx.  This could be due to
@@ -494,58 +462,69 @@ InstructionTLBError:
  */
        . = 0x1400
 DataTLBError:
-       mtspr   SPRN_SPRG_SCRATCH0, r10
-       mtspr   SPRN_SPRG_SCRATCH1, r11
-       mfcr    r10
-
+       EXCEPTION_PROLOG_0 handle_dar_dsisr=1
        mfspr   r11, SPRN_DAR
-       cmpwi   cr0, r11, RPN_PATTERN
-       beq-    FixupDAR        /* must be a buggy dcbX, icbi insn. */
+       cmpwi   cr1, r11, RPN_PATTERN
+       beq-    cr1, FixupDAR   /* must be a buggy dcbX, icbi insn. */
 DARFixed:/* Return from dcbx instruction bug workaround */
+#ifdef CONFIG_VMAP_STACK
+       li      r11, RPN_PATTERN
+       mtspr   SPRN_DAR, r11   /* Tag DAR, to be used in DTLB Error */
+#endif
        EXCEPTION_PROLOG_1
-       EXCEPTION_PROLOG_2
-       mfspr   r5,SPRN_DSISR
-       stw     r5,_DSISR(r11)
-       mfspr   r4,SPRN_DAR
+       EXCEPTION_PROLOG_2 handle_dar_dsisr=1
+       get_and_save_dar_dsisr_on_stack r4, r5, r11
        andis.  r10,r5,DSISR_NOHPTE@h
        beq+    .Ldtlbie
        tlbie   r4
 .Ldtlbie:
+#ifndef CONFIG_VMAP_STACK
        li      r10,RPN_PATTERN
        mtspr   SPRN_DAR,r10    /* Tag DAR, to be used in DTLB Error */
+#endif
        /* 0x300 is DataAccess exception, needed by bad_page_fault() */
        EXC_XFER_LITE(0x300, handle_page_fault)
 
-       EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
-       EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD)
-       EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD)
-       EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
-       EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
-       EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
-       EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
+/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
+#ifdef CONFIG_PERF_EVENTS
+       patch_site      0f, patch__dtlbmiss_perf
+0:     lwz     r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+       addi    r10, r10, 1
+       stw     r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+       mfspr   r10, SPRN_DAR
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r11, SPRN_M_TW
+       rfi
+#endif
+
+stack_overflow:
+       vmap_stack_overflow_exception
 
 /* On the MPC8xx, these next four traps are used for development
  * support of breakpoints and such.  Someday I will get around to
  * using them.
  */
-       . = 0x1c00
-DataBreakpoint:
-       mtspr   SPRN_SPRG_SCRATCH0, r10
-       mtspr   SPRN_SPRG_SCRATCH1, r11
-       mfcr    r10
-       mfspr   r11, SPRN_SRR0
-       cmplwi  cr0, r11, (.Ldtlbie - PAGE_OFFSET)@l
-       cmplwi  cr7, r11, (.Litlbie - PAGE_OFFSET)@l
-       beq-    cr0, 11f
-       beq-    cr7, 11f
+do_databreakpoint:
        EXCEPTION_PROLOG_1
-       EXCEPTION_PROLOG_2
+       EXCEPTION_PROLOG_2 handle_dar_dsisr=1
        addi    r3,r1,STACK_FRAME_OVERHEAD
        mfspr   r4,SPRN_BAR
        stw     r4,_DAR(r11)
+#ifdef CONFIG_VMAP_STACK
+       lwz     r5,_DSISR(r11)
+#else
        mfspr   r5,SPRN_DSISR
+#endif
        EXC_XFER_STD(0x1c00, do_break)
-11:
+
+       . = 0x1c00
+DataBreakpoint:
+       EXCEPTION_PROLOG_0 handle_dar_dsisr=1
+       mfspr   r11, SPRN_SRR0
+       cmplwi  cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l
+       cmplwi  cr7, r11, (.Litlbie - PAGE_OFFSET)@l
+       cror    4*cr1+eq, 4*cr1+eq, 4*cr7+eq
+       bne     cr1, do_databreakpoint
        mtcr    r10
        mfspr   r10, SPRN_SPRG_SCRATCH0
        mfspr   r11, SPRN_SPRG_SCRATCH1
@@ -581,9 +560,9 @@ FixupDAR:/* Entry point for dcbx workaround. */
        mfspr   r10, SPRN_SRR0
        mtspr   SPRN_MD_EPN, r10
        rlwinm  r11, r10, 16, 0xfff8
-       cmpli   cr0, r11, PAGE_OFFSET@h
+       cmpli   cr1, r11, PAGE_OFFSET@h
        mfspr   r11, SPRN_M_TWB /* Get level 1 table */
-       blt+    3f
+       blt+    cr1, 3f
        rlwinm  r11, r10, 16, 0xfff8
 
 0:     cmpli   cr7, r11, (PAGE_OFFSET + 0x1800000)@h
@@ -598,7 +577,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
 3:
        lwz     r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11)        /* Get the level 1 entry */
        mtspr   SPRN_MD_TWC, r11
-       mtcr    r11
+       mtcrf   0x01, r11
        mfspr   r11, SPRN_MD_TWC
        lwz     r11, 0(r11)     /* Get the pte */
        bt      28,200f         /* bit 28 = Large page (8M) */
@@ -611,16 +590,16 @@ FixupDAR:/* Entry point for dcbx workaround. */
  * no need to include them here */
        xoris   r10, r11, 0x7c00        /* check if major OP code is 31 */
        rlwinm  r10, r10, 0, 21, 5
-       cmpwi   cr0, r10, 2028  /* Is dcbz? */
-       beq+    142f
-       cmpwi   cr0, r10, 940   /* Is dcbi? */
-       beq+    142f
-       cmpwi   cr0, r10, 108   /* Is dcbst? */
-       beq+    144f            /* Fix up store bit! */
-       cmpwi   cr0, r10, 172   /* Is dcbf? */
-       beq+    142f
-       cmpwi   cr0, r10, 1964  /* Is icbi? */
-       beq+    142f
+       cmpwi   cr1, r10, 2028  /* Is dcbz? */
+       beq+    cr1, 142f
+       cmpwi   cr1, r10, 940   /* Is dcbi? */
+       beq+    cr1, 142f
+       cmpwi   cr1, r10, 108   /* Is dcbst? */
+       beq+    cr1, 144f               /* Fix up store bit! */
+       cmpwi   cr1, r10, 172   /* Is dcbf? */
+       beq+    cr1, 142f
+       cmpwi   cr1, r10, 1964  /* Is icbi? */
+       beq+    cr1, 142f
 141:   mfspr   r10,SPRN_M_TW
        b       DARFixed        /* Nope, go back to normal TLB processing */
 
@@ -679,8 +658,9 @@ FixupDAR:/* Entry point for dcbx workaround. */
        add     r10, r10, r30   ;b      151f
        add     r10, r10, r31
 151:
-       rlwinm. r11,r11,19,24,28        /* offset into jump table for reg RA */
-       beq     152f                    /* if reg RA is zero, don't add it */
+       rlwinm  r11,r11,19,24,28        /* offset into jump table for reg RA */
+       cmpwi   cr1, r11, 0
+       beq     cr1, 152f               /* if reg RA is zero, don't add it */
        addi    r11, r11, 150b@l        /* add start of table */
        mtctr   r11                     /* load ctr with jump address */
        rlwinm  r11,r11,0,16,10         /* make sure we don't execute this more than once */
@@ -688,7 +668,14 @@ FixupDAR:/* Entry point for dcbx workaround. */
 152:
        mfdar   r11
        mtctr   r11                     /* restore ctr reg from DAR */
+#ifdef CONFIG_VMAP_STACK
+       mfspr   r11, SPRN_SPRG_THREAD
+       stw     r10, DAR(r11)
+       mfspr   r10, SPRN_DSISR
+       stw     r10, DSISR(r11)
+#else
        mtdar   r10                     /* save fault EA to DAR */
+#endif
        mfspr   r10,SPRN_M_TW
        b       DARFixed                /* Go back to normal TLB handling */
 
index 2ae635d..37fc84e 100644 (file)
@@ -467,6 +467,7 @@ label:
        mfspr   r5,SPRN_ESR;            /* Grab the ESR and save it */        \
        stw     r5,_ESR(r11);                                                 \
        mfspr   r4,SPRN_DEAR;           /* Grab the DEAR */                   \
+       stw     r4, _DEAR(r11);                                               \
        EXC_XFER_LITE(0x0300, handle_page_fault)
 
 #define INSTRUCTION_STORAGE_EXCEPTION                                        \
@@ -475,6 +476,7 @@ label:
        mfspr   r5,SPRN_ESR;            /* Grab the ESR and save it */        \
        stw     r5,_ESR(r11);                                                 \
        mr      r4,r12;                 /* Pass SRR0 as arg2 */               \
+       stw     r4, _DEAR(r11);                                               \
        li      r5,0;                   /* Pass zero as arg3 */               \
        EXC_XFER_LITE(0x0400, handle_page_fault)
 
index 6f7a3a7..840af00 100644 (file)
@@ -378,6 +378,7 @@ interrupt_base:
        mfspr   r4,SPRN_DEAR            /* Grab the DEAR, save it, pass arg2 */
        andis.  r10,r5,(ESR_ILK|ESR_DLK)@h
        bne     1f
+       stw     r4, _DEAR(r11)
        EXC_XFER_LITE(0x0300, handle_page_fault)
 1:
        addi    r3,r1,STACK_FRAME_OVERHEAD
index 58ce3d3..2462cd7 100644 (file)
@@ -160,6 +160,9 @@ static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
                /* DAWR region can't cross 512 bytes boundary */
                if ((start_addr >> 9) != (end_addr >> 9))
                        return -EINVAL;
+       } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
+               /* 8xx can setup a range without limitation */
+               max_len = U16_MAX;
        }
 
        if (hw_len > max_len)
@@ -328,13 +331,11 @@ int hw_breakpoint_handler(struct die_args *args)
        }
 
        info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
-       if (IS_ENABLED(CONFIG_PPC_8xx)) {
-               if (!dar_within_range(regs->dar, info))
-                       info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
-       } else {
-               if (!stepping_handler(regs, bp, info))
-                       goto out;
-       }
+       if (!dar_within_range(regs->dar, info))
+               info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+
+       if (!IS_ENABLED(CONFIG_PPC_8xx) && !stepping_handler(regs, bp, info))
+               goto out;
 
        /*
         * As a policy, the callback is invoked in a 'trigger-after-execute'
index a36fd05..422e31d 100644 (file)
@@ -77,6 +77,31 @@ void arch_cpu_idle(void)
 
 int powersave_nap;
 
+#ifdef CONFIG_PPC_970_NAP
+void power4_idle(void)
+{
+       if (!cpu_has_feature(CPU_FTR_CAN_NAP))
+               return;
+
+       if (!powersave_nap)
+               return;
+
+       if (!prep_irq_for_idle())
+               return;
+
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               asm volatile("DSSALL ; sync" ::: "memory");
+
+       power4_idle_nap();
+
+       /*
+        * power4_idle_nap returns with interrupts enabled (soft and hard).
+        * to our caller with interrupts enabled (soft and hard). Our caller
+        * can cope with either interrupts disabled or enabled upon return.
+        */
+}
+#endif
+
 #ifdef CONFIG_SYSCTL
 /*
  * Register the sysctl to set/clear powersave_nap.
index d327519..22f249b 100644 (file)
@@ -15,7 +15,9 @@
 #include <asm/asm-offsets.h>
 #include <asm/ppc-opcode.h>
 #include <asm/cpuidle.h>
+#include <asm/thread_info.h> /* TLF_NAPPING */
 
+#ifdef CONFIG_PPC_P7_NAP
 /*
  * Desired PSSCR in r3
  *
@@ -181,4 +183,22 @@ _GLOBAL(isa206_idle_insn_mayloss)
        bne     2f
        IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
 2:     IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
+#endif
 
+#ifdef CONFIG_PPC_970_NAP
+_GLOBAL(power4_idle_nap)
+       LOAD_REG_IMMEDIATE(r7, MSR_KERNEL|MSR_EE|MSR_POW)
+       ld      r9,PACA_THREAD_INFO(r13)
+       ld      r8,TI_LOCAL_FLAGS(r9)
+       ori     r8,r8,_TLF_NAPPING
+       std     r8,TI_LOCAL_FLAGS(r9)
+       /*
+        * NAPPING bit is set, from this point onward power4_fixup_nap
+        * will cause exceptions to return to power4_idle_nap_return.
+        */
+1:     sync
+       isync
+       mtmsrd  r7
+       isync
+       b       1b
+#endif
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
deleted file mode 100644 (file)
index 33c6253..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *  This file contains the power_save function for 970-family CPUs.
- */
-
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/irqflags.h>
-#include <asm/hw_irq.h>
-#include <asm/feature-fixups.h>
-
-#undef DEBUG
-
-       .text
-
-_GLOBAL(power4_idle)
-BEGIN_FTR_SECTION
-       blr
-END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
-       /* Now check if user or arch enabled NAP mode */
-       LOAD_REG_ADDRBASE(r3,powersave_nap)
-       lwz     r4,ADDROFF(powersave_nap)(r3)
-       cmpwi   0,r4,0
-       beqlr
-
-       /* This sequence is similar to prep_irq_for_idle() */
-
-       /* Hard disable interrupts */
-       mfmsr   r7
-       rldicl  r0,r7,48,1
-       rotldi  r0,r0,16
-       mtmsrd  r0,1
-
-       /* Check if something happened while soft-disabled */
-       lbz     r0,PACAIRQHAPPENED(r13)
-       cmpwi   cr0,r0,0
-       bne-    2f
-
-       /*
-        * Soft-enable interrupts. This will make power4_fixup_nap return
-        * to our caller with interrupts enabled (soft and hard). The caller
-        * can cope with either interrupts disabled or enabled upon return.
-        */
-#ifdef CONFIG_TRACE_IRQFLAGS
-       /* Tell the tracer interrupts are on, because idle responds to them. */
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,-128(r1)
-       bl      trace_hardirqs_on
-       addi    r1,r1,128
-       ld      r0,16(r1)
-       mtlr    r0
-       mfmsr   r7
-#endif /* CONFIG_TRACE_IRQFLAGS */
-
-       li      r0,IRQS_ENABLED
-       stb     r0,PACAIRQSOFTMASK(r13) /* we'll hard-enable shortly */
-BEGIN_FTR_SECTION
-       DSSALL
-       sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-       ld      r9, PACA_THREAD_INFO(r13)
-       ld      r8,TI_LOCAL_FLAGS(r9)   /* set napping bit */
-       ori     r8,r8,_TLF_NAPPING      /* so when we take an exception */
-       std     r8,TI_LOCAL_FLAGS(r9)   /* it will return to our caller */
-       ori     r7,r7,MSR_EE
-       oris    r7,r7,MSR_POW@h
-1:     sync
-       isync
-       mtmsrd  r7
-       isync
-       b       1b
-
-2:     /* Return if an interrupt had happened while soft disabled */
-       /* Set the HARD_DIS flag because interrupts are now hard disabled */
-       ori     r0,r0,PACA_IRQ_HARD_DIS
-       stb     r0,PACAIRQHAPPENED(r13)
-       blr
index add6749..5c9b118 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/debugfs.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/vmalloc.h>
 
 #include <linux/uaccess.h>
 #include <asm/io.h>
@@ -664,8 +665,29 @@ void do_IRQ(struct pt_regs *regs)
        set_irq_regs(old_regs);
 }
 
+static void *__init alloc_vm_stack(void)
+{
+       return __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, VMALLOC_START,
+                                   VMALLOC_END, THREADINFO_GFP, PAGE_KERNEL,
+                                    0, NUMA_NO_NODE, (void*)_RET_IP_);
+}
+
+static void __init vmap_irqstack_init(void)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               softirq_ctx[i] = alloc_vm_stack();
+               hardirq_ctx[i] = alloc_vm_stack();
+       }
+}
+
+
 void __init init_IRQ(void)
 {
+       if (IS_ENABLED(CONFIG_VMAP_STACK))
+               vmap_irqstack_init();
+
        if (ppc_md.init_IRQ)
                ppc_md.init_IRQ();
 }
index 1c448cf..c6c0341 100644 (file)
@@ -261,12 +261,6 @@ int pcibios_sriov_disable(struct pci_dev *pdev)
 
 #endif /* CONFIG_PCI_IOV */
 
-void pcibios_bus_add_device(struct pci_dev *pdev)
-{
-       if (ppc_md.pcibios_bus_add_device)
-               ppc_md.pcibios_bus_add_device(pdev);
-}
-
 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
 {
 #ifdef CONFIG_PPC64
@@ -964,7 +958,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
                phb->controller_ops.dma_bus_setup(bus);
 }
 
-static void pcibios_setup_device(struct pci_dev *dev)
+void pcibios_bus_add_device(struct pci_dev *dev)
 {
        struct pci_controller *phb;
        /* Fixup NUMA node as it may not be setup yet by the generic
@@ -985,17 +979,13 @@ static void pcibios_setup_device(struct pci_dev *dev)
        pci_read_irq_line(dev);
        if (ppc_md.pci_irq_fixup)
                ppc_md.pci_irq_fixup(dev);
+
+       if (ppc_md.pcibios_bus_add_device)
+               ppc_md.pcibios_bus_add_device(dev);
 }
 
 int pcibios_add_device(struct pci_dev *dev)
 {
-       /*
-        * We can only call pcibios_setup_device() after bus setup is complete,
-        * since some of the platform specific DMA setup code depends on it.
-        */
-       if (dev->bus->is_added)
-               pcibios_setup_device(dev);
-
 #ifdef CONFIG_PCI_IOV
        if (ppc_md.pcibios_fixup_sriov)
                ppc_md.pcibios_fixup_sriov(dev);
@@ -1004,24 +994,6 @@ int pcibios_add_device(struct pci_dev *dev)
        return 0;
 }
 
-void pcibios_setup_bus_devices(struct pci_bus *bus)
-{
-       struct pci_dev *dev;
-
-       pr_debug("PCI: Fixup bus devices %d (%s)\n",
-                bus->number, bus->self ? pci_name(bus->self) : "PHB");
-
-       list_for_each_entry(dev, &bus->devices, bus_list) {
-               /* Cardbus can call us to add new devices to a bus, so ignore
-                * those who are already fully discovered
-                */
-               if (pci_dev_is_added(dev))
-                       continue;
-
-               pcibios_setup_device(dev);
-       }
-}
-
 void pcibios_set_master(struct pci_dev *dev)
 {
        /* No special bus mastering setup handling */
@@ -1037,19 +1009,9 @@ void pcibios_fixup_bus(struct pci_bus *bus)
 
        /* Now fixup the bus bus */
        pcibios_setup_bus_self(bus);
-
-       /* Now fixup devices on that bus */
-       pcibios_setup_bus_devices(bus);
 }
 EXPORT_SYMBOL(pcibios_fixup_bus);
 
-void pci_fixup_cardbus(struct pci_bus *bus)
-{
-       /* Now fixup devices on that bus */
-       pcibios_setup_bus_devices(bus);
-}
-
-
 static int skip_isa_ioresource_align(struct pci_dev *dev)
 {
        if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
index fc62c4b..d6a67f8 100644 (file)
@@ -134,7 +134,6 @@ void pci_hp_add_devices(struct pci_bus *bus)
                 */
                slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
                pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
-               pcibios_setup_bus_devices(bus);
                max = bus->busn_res.start;
                /*
                 * Scan bridges that are already configured. We don't touch
index 9524009..4e654df 100644 (file)
@@ -125,7 +125,7 @@ struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_PCI_IOV
-static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
+static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent,
                                           int vf_index,
                                           int busno, int devfn)
 {
@@ -151,17 +151,15 @@ static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
 
        return pdn;
 }
-#endif
 
-struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
+struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev)
 {
-#ifdef CONFIG_PCI_IOV
        struct pci_dn *parent, *pdn;
        int i;
 
        /* Only support IOV for now */
-       if (!pdev->is_physfn)
-               return pci_get_pdn(pdev);
+       if (WARN_ON(!pdev->is_physfn))
+               return NULL;
 
        /* Check if VFs have been populated */
        pdn = pci_get_pdn(pdev);
@@ -176,7 +174,7 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
        for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
                struct eeh_dev *edev __maybe_unused;
 
-               pdn = add_one_dev_pci_data(parent, i,
+               pdn = add_one_sriov_vf_pdn(parent, i,
                                           pci_iov_virtfn_bus(pdev, i),
                                           pci_iov_virtfn_devfn(pdev, i));
                if (!pdn) {
@@ -192,31 +190,17 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
                edev->physfn = pdev;
 #endif /* CONFIG_EEH */
        }
-#endif /* CONFIG_PCI_IOV */
-
        return pci_get_pdn(pdev);
 }
 
-void remove_dev_pci_data(struct pci_dev *pdev)
+void remove_sriov_vf_pdns(struct pci_dev *pdev)
 {
-#ifdef CONFIG_PCI_IOV
        struct pci_dn *parent;
        struct pci_dn *pdn, *tmp;
        int i;
 
-       /*
-        * VF and VF PE are created/released dynamically, so we need to
-        * bind/unbind them.  Otherwise the VF and VF PE would be mismatched
-        * when re-enabling SR-IOV.
-        */
-       if (pdev->is_virtfn) {
-               pdn = pci_get_pdn(pdev);
-               pdn->pe_number = IODA_INVALID_PE;
-               return;
-       }
-
        /* Only support IOV PF for now */
-       if (!pdev->is_physfn)
+       if (WARN_ON(!pdev->is_physfn))
                return;
 
        /* Check if VFs have been populated */
@@ -244,9 +228,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
                                continue;
 
 #ifdef CONFIG_EEH
-                       /* Release EEH device for the VF */
+                       /*
+                        * Release EEH state for this VF. The PCI core
+                        * has already torn down the pci_dev for this VF, but
+                        * we're responsible to removing the eeh_dev since it
+                        * has the same lifetime as the pci_dn that spawned it.
+                        */
                        edev = pdn_to_eeh_dev(pdn);
                        if (edev) {
+                               /*
+                                * We allocate pci_dn's for the totalvfs count,
+                                * but only only the vfs that were activated
+                                * have a configured PE.
+                                */
+                               if (edev->pe)
+                                       eeh_rmv_from_parent_pe(edev);
+
                                pdn->edev = NULL;
                                kfree(edev);
                        }
@@ -258,8 +255,8 @@ void remove_dev_pci_data(struct pci_dev *pdev)
                        kfree(pdn);
                }
        }
-#endif /* CONFIG_PCI_IOV */
 }
+#endif /* CONFIG_PCI_IOV */
 
 struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
                                        struct device_node *dn)
index f91d7e9..c3024f1 100644 (file)
@@ -414,7 +414,6 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
         */
        if (!rescan_existing)
                pcibios_setup_bus_self(bus);
-       pcibios_setup_bus_devices(bus);
 
        /* Now scan child busses */
        for_each_pci_bridge(dev, bus)
index be3758d..8778174 100644 (file)
@@ -39,10 +39,10 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
        return 0;
 }
 
-static const struct file_operations page_map_fops = {
-       .llseek = page_map_seek,
-       .read   = page_map_read,
-       .mmap   = page_map_mmap
+static const struct proc_ops page_map_proc_ops = {
+       .proc_lseek     = page_map_seek,
+       .proc_read      = page_map_read,
+       .proc_mmap      = page_map_mmap,
 };
 
 
@@ -51,7 +51,7 @@ static int __init proc_ppc64_init(void)
        struct proc_dir_entry *pde;
 
        pde = proc_create_data("powerpc/systemcfg", S_IFREG | 0444, NULL,
-                              &page_map_fops, vdso_data);
+                              &page_map_proc_ops, vdso_data);
        if (!pde)
                return 1;
        proc_set_size(pde, PAGE_SIZE);
index 4df94b6..fad50db 100644 (file)
@@ -740,28 +740,6 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
                mtspr(SPRN_DABRX, dabrx);
        return 0;
 }
-#elif defined(CONFIG_PPC_8xx)
-static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
-{
-       unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
-       unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
-       unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
-
-       if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
-               lctrl1 |= 0xa0000;
-       else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
-               lctrl1 |= 0xf0000;
-       else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
-               lctrl2 = 0;
-
-       mtspr(SPRN_LCTRL2, 0);
-       mtspr(SPRN_CMPE, addr);
-       mtspr(SPRN_CMPF, addr + 4);
-       mtspr(SPRN_LCTRL1, lctrl1);
-       mtspr(SPRN_LCTRL2, lctrl2);
-
-       return 0;
-}
 #else
 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
 {
@@ -782,6 +760,39 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk)
        return __set_dabr(dabr, dabrx);
 }
 
+static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
+{
+       unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
+                              LCTRL1_CRWF_RW;
+       unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
+       unsigned long start_addr = brk->address & ~HW_BREAKPOINT_ALIGN;
+       unsigned long end_addr = (brk->address + brk->len - 1) | HW_BREAKPOINT_ALIGN;
+
+       if (start_addr == 0)
+               lctrl2 |= LCTRL2_LW0LA_F;
+       else if (end_addr == ~0U)
+               lctrl2 |= LCTRL2_LW0LA_E;
+       else
+               lctrl2 |= LCTRL2_LW0LA_EandF;
+
+       mtspr(SPRN_LCTRL2, 0);
+
+       if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
+               return 0;
+
+       if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
+               lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
+       if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
+               lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
+
+       mtspr(SPRN_CMPE, start_addr - 1);
+       mtspr(SPRN_CMPF, end_addr + 1);
+       mtspr(SPRN_LCTRL1, lctrl1);
+       mtspr(SPRN_LCTRL2, lctrl2);
+
+       return 0;
+}
+
 void __set_breakpoint(struct arch_hw_breakpoint *brk)
 {
        memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
@@ -789,6 +800,8 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
        if (dawr_enabled())
                // Power8 or later
                set_dawr(brk);
+       else if (IS_ENABLED(CONFIG_PPC_8xx))
+               set_breakpoint_8xx(brk);
        else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
                // Power7 or earlier
                set_dabr(brk);
@@ -1264,16 +1277,6 @@ void show_user_instructions(struct pt_regs *regs)
 
        pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
 
-       /*
-        * Make sure the NIP points at userspace, not kernel text/data or
-        * elsewhere.
-        */
-       if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) {
-               pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
-                       current->comm, current->pid);
-               return;
-       }
-
        seq_buf_init(&s, buf, sizeof(buf));
 
        while (n) {
@@ -1284,7 +1287,7 @@ void show_user_instructions(struct pt_regs *regs)
                for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
                        int instr;
 
-                       if (probe_kernel_address((const void *)pc, instr)) {
+                       if (probe_user_read(&instr, (void __user *)pc, sizeof(instr))) {
                                seq_buf_printf(&s, "XXXXXXXX ");
                                continue;
                        }
index 487dcd8..2d33f34 100644 (file)
@@ -159,12 +159,12 @@ static int poweron_open(struct inode *inode, struct file *file)
        return single_open(file, ppc_rtas_poweron_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_poweron_operations = {
-       .open           = poweron_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .write          = ppc_rtas_poweron_write,
-       .release        = single_release,
+static const struct proc_ops ppc_rtas_poweron_proc_ops = {
+       .proc_open      = poweron_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = ppc_rtas_poweron_write,
+       .proc_release   = single_release,
 };
 
 static int progress_open(struct inode *inode, struct file *file)
@@ -172,12 +172,12 @@ static int progress_open(struct inode *inode, struct file *file)
        return single_open(file, ppc_rtas_progress_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_progress_operations = {
-       .open           = progress_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .write          = ppc_rtas_progress_write,
-       .release        = single_release,
+static const struct proc_ops ppc_rtas_progress_proc_ops = {
+       .proc_open      = progress_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = ppc_rtas_progress_write,
+       .proc_release   = single_release,
 };
 
 static int clock_open(struct inode *inode, struct file *file)
@@ -185,12 +185,12 @@ static int clock_open(struct inode *inode, struct file *file)
        return single_open(file, ppc_rtas_clock_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_clock_operations = {
-       .open           = clock_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .write          = ppc_rtas_clock_write,
-       .release        = single_release,
+static const struct proc_ops ppc_rtas_clock_proc_ops = {
+       .proc_open      = clock_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = ppc_rtas_clock_write,
+       .proc_release   = single_release,
 };
 
 static int tone_freq_open(struct inode *inode, struct file *file)
@@ -198,12 +198,12 @@ static int tone_freq_open(struct inode *inode, struct file *file)
        return single_open(file, ppc_rtas_tone_freq_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_tone_freq_operations = {
-       .open           = tone_freq_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .write          = ppc_rtas_tone_freq_write,
-       .release        = single_release,
+static const struct proc_ops ppc_rtas_tone_freq_proc_ops = {
+       .proc_open      = tone_freq_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = ppc_rtas_tone_freq_write,
+       .proc_release   = single_release,
 };
 
 static int tone_volume_open(struct inode *inode, struct file *file)
@@ -211,12 +211,12 @@ static int tone_volume_open(struct inode *inode, struct file *file)
        return single_open(file, ppc_rtas_tone_volume_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_tone_volume_operations = {
-       .open           = tone_volume_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .write          = ppc_rtas_tone_volume_write,
-       .release        = single_release,
+static const struct proc_ops ppc_rtas_tone_volume_proc_ops = {
+       .proc_open      = tone_volume_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = ppc_rtas_tone_volume_write,
+       .proc_release   = single_release,
 };
 
 static int ppc_rtas_find_all_sensors(void);
@@ -238,17 +238,17 @@ static int __init proc_rtas_init(void)
                return -ENODEV;
 
        proc_create("powerpc/rtas/progress", 0644, NULL,
-                   &ppc_rtas_progress_operations);
+                   &ppc_rtas_progress_proc_ops);
        proc_create("powerpc/rtas/clock", 0644, NULL,
-                   &ppc_rtas_clock_operations);
+                   &ppc_rtas_clock_proc_ops);
        proc_create("powerpc/rtas/poweron", 0644, NULL,
-                   &ppc_rtas_poweron_operations);
+                   &ppc_rtas_poweron_proc_ops);
        proc_create_single("powerpc/rtas/sensors", 0444, NULL,
                        ppc_rtas_sensors_show);
        proc_create("powerpc/rtas/frequency", 0644, NULL,
-                   &ppc_rtas_tone_freq_operations);
+                   &ppc_rtas_tone_freq_proc_ops);
        proc_create("powerpc/rtas/volume", 0644, NULL,
-                   &ppc_rtas_tone_volume_operations);
+                   &ppc_rtas_tone_volume_proc_ops);
        proc_create_single("powerpc/rtas/rmo_buffer", 0400, NULL,
                        ppc_rtas_rmo_buf_show);
        return 0;
index 84f7947..a99179d 100644 (file)
@@ -655,7 +655,7 @@ struct rtas_flash_file {
        const char *filename;
        const char *rtas_call_name;
        int *status;
-       const struct file_operations fops;
+       const struct proc_ops ops;
 };
 
 static const struct rtas_flash_file rtas_flash_files[] = {
@@ -663,36 +663,36 @@ static const struct rtas_flash_file rtas_flash_files[] = {
                .filename       = "powerpc/rtas/" FIRMWARE_FLASH_NAME,
                .rtas_call_name = "ibm,update-flash-64-and-reboot",
                .status         = &rtas_update_flash_data.status,
-               .fops.read      = rtas_flash_read_msg,
-               .fops.write     = rtas_flash_write,
-               .fops.release   = rtas_flash_release,
-               .fops.llseek    = default_llseek,
+               .ops.proc_read  = rtas_flash_read_msg,
+               .ops.proc_write = rtas_flash_write,
+               .ops.proc_release = rtas_flash_release,
+               .ops.proc_lseek = default_llseek,
        },
        {
                .filename       = "powerpc/rtas/" FIRMWARE_UPDATE_NAME,
                .rtas_call_name = "ibm,update-flash-64-and-reboot",
                .status         = &rtas_update_flash_data.status,
-               .fops.read      = rtas_flash_read_num,
-               .fops.write     = rtas_flash_write,
-               .fops.release   = rtas_flash_release,
-               .fops.llseek    = default_llseek,
+               .ops.proc_read  = rtas_flash_read_num,
+               .ops.proc_write = rtas_flash_write,
+               .ops.proc_release = rtas_flash_release,
+               .ops.proc_lseek = default_llseek,
        },
        {
                .filename       = "powerpc/rtas/" VALIDATE_FLASH_NAME,
                .rtas_call_name = "ibm,validate-flash-image",
                .status         = &rtas_validate_flash_data.status,
-               .fops.read      = validate_flash_read,
-               .fops.write     = validate_flash_write,
-               .fops.release   = validate_flash_release,
-               .fops.llseek    = default_llseek,
+               .ops.proc_read  = validate_flash_read,
+               .ops.proc_write = validate_flash_write,
+               .ops.proc_release = validate_flash_release,
+               .ops.proc_lseek = default_llseek,
        },
        {
                .filename       = "powerpc/rtas/" MANAGE_FLASH_NAME,
                .rtas_call_name = "ibm,manage-flash-image",
                .status         = &rtas_manage_flash_data.status,
-               .fops.read      = manage_flash_read,
-               .fops.write     = manage_flash_write,
-               .fops.llseek    = default_llseek,
+               .ops.proc_read  = manage_flash_read,
+               .ops.proc_write = manage_flash_write,
+               .ops.proc_lseek = default_llseek,
        }
 };
 
@@ -723,7 +723,7 @@ static int __init rtas_flash_init(void)
                const struct rtas_flash_file *f = &rtas_flash_files[i];
                int token;
 
-               if (!proc_create(f->filename, 0600, NULL, &f->fops))
+               if (!proc_create(f->filename, 0600, NULL, &f->ops))
                        goto enomem;
 
                /*
index 8d02e04..89b798f 100644 (file)
@@ -385,12 +385,12 @@ static __poll_t rtas_log_poll(struct file *file, poll_table * wait)
        return 0;
 }
 
-static const struct file_operations proc_rtas_log_operations = {
-       .read =         rtas_log_read,
-       .poll =         rtas_log_poll,
-       .open =         rtas_log_open,
-       .release =      rtas_log_release,
-       .llseek =       noop_llseek,
+static const struct proc_ops rtas_log_proc_ops = {
+       .proc_read      = rtas_log_read,
+       .proc_poll      = rtas_log_poll,
+       .proc_open      = rtas_log_open,
+       .proc_release   = rtas_log_release,
+       .proc_lseek     = noop_llseek,
 };
 
 static int enable_surveillance(int timeout)
@@ -572,7 +572,7 @@ static int __init rtas_init(void)
                return -ENODEV;
 
        entry = proc_create("powerpc/rtas/error_log", 0400, NULL,
-                           &proc_rtas_log_operations);
+                           &rtas_log_proc_ops);
        if (!entry)
                printk(KERN_ERR "Failed to create error_log proc entry\n");
 
index c82577c..2dd0d9c 100644 (file)
@@ -35,7 +35,7 @@ void exc_lvl_early_init(void);
 static inline void exc_lvl_early_init(void) { };
 #endif
 
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) || defined(CONFIG_VMAP_STACK)
 void emergency_stack_init(void);
 #else
 static inline void emergency_stack_init(void) { };
index dcffe92..5b49b26 100644 (file)
@@ -140,7 +140,7 @@ arch_initcall(ppc_init);
 
 static void *__init alloc_stack(void)
 {
-       void *ptr = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+       void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN);
 
        if (!ptr)
                panic("cannot allocate %d bytes for stack at %pS\n",
@@ -153,6 +153,9 @@ void __init irqstack_early_init(void)
 {
        unsigned int i;
 
+       if (IS_ENABLED(CONFIG_VMAP_STACK))
+               return;
+
        /* interrupt stacks must be in lowmem, we get that for free on ppc32
         * as the memblock is limited to lowmem by default */
        for_each_possible_cpu(i) {
@@ -161,6 +164,18 @@ void __init irqstack_early_init(void)
        }
 }
 
+#ifdef CONFIG_VMAP_STACK
+void *emergency_ctx[NR_CPUS] __ro_after_init;
+
+void __init emergency_stack_init(void)
+{
+       unsigned int i;
+
+       for_each_possible_cpu(i)
+               emergency_ctx[i] = alloc_stack();
+}
+#endif
+
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 void __init exc_lvl_early_init(void)
 {
index 6104917..e05e6dd 100644 (file)
@@ -633,7 +633,7 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
 
        BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
 
-       ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE,
+       ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
                                     MEMBLOCK_LOW_LIMIT, limit,
                                     early_cpu_to_node(cpu));
        if (!ptr)
index 014ff07..82a3438 100644 (file)
@@ -1637,6 +1637,15 @@ void StackOverflow(struct pt_regs *regs)
        panic("kernel stack overflow");
 }
 
+void stack_overflow_exception(struct pt_regs *regs)
+{
+       enum ctx_state prev_state = exception_enter();
+
+       die("Kernel stack overflow", regs, SIGSEGV);
+
+       exception_exit(prev_state);
+}
+
 void kernel_fp_unavailable_exception(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
index eae9dda..b9a1084 100644 (file)
@@ -728,11 +728,6 @@ static int __init vdso_init(void)
         */
        vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
        DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages);
-#else
-       vdso_data->dcache_block_size = L1_CACHE_BYTES;
-       vdso_data->dcache_log_block_size = L1_CACHE_SHIFT;
-       vdso_data->icache_block_size = L1_CACHE_BYTES;
-       vdso_data->icache_log_block_size = L1_CACHE_SHIFT;
 #endif /* CONFIG_PPC64 */
 
 
index 06f54d9..e147bbd 100644 (file)
@@ -2,9 +2,7 @@
 
 # List of files in the vdso, has to be asm only for now
 
-obj-vdso32-$(CONFIG_PPC64) = getcpu.o
-obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o \
-               $(obj-vdso32-y)
+obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o getcpu.o
 
 # Build rules
 
index 7f882e7..3440ddf 100644 (file)
@@ -8,7 +8,9 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
 #include <asm/asm-offsets.h>
+#include <asm/cache.h>
 
        .text
 
  */
 V_FUNCTION_BEGIN(__kernel_sync_dicache)
   .cfi_startproc
+#ifdef CONFIG_PPC64
        mflr    r12
   .cfi_register lr,r12
-       mr      r11,r3
-       bl      __get_datapage@local
+       get_datapage    r10, r0
        mtlr    r12
-       mr      r10,r3
+#endif
 
+#ifdef CONFIG_PPC64
        lwz     r7,CFG_DCACHE_BLOCKSZ(r10)
        addi    r5,r7,-1
-       andc    r6,r11,r5               /* round low to line bdy */
+#else
+       li      r5, L1_CACHE_BYTES - 1
+#endif
+       andc    r6,r3,r5                /* round low to line bdy */
        subf    r8,r6,r4                /* compute length */
        add     r8,r8,r5                /* ensure we get enough */
+#ifdef CONFIG_PPC64
        lwz     r9,CFG_DCACHE_LOGBLOCKSZ(r10)
        srw.    r8,r8,r9                /* compute line count */
+#else
+       srwi.   r8, r8, L1_CACHE_SHIFT
+       mr      r7, r6
+#endif
        crclr   cr0*4+so
        beqlr                           /* nothing to do? */
        mtctr   r8
 1:     dcbst   0,r6
+#ifdef CONFIG_PPC64
        add     r6,r6,r7
+#else
+       addi    r6, r6, L1_CACHE_BYTES
+#endif
        bdnz    1b
        sync
 
 /* Now invalidate the instruction cache */
 
+#ifdef CONFIG_PPC64
        lwz     r7,CFG_ICACHE_BLOCKSZ(r10)
        addi    r5,r7,-1
-       andc    r6,r11,r5               /* round low to line bdy */
+       andc    r6,r3,r5                /* round low to line bdy */
        subf    r8,r6,r4                /* compute length */
        add     r8,r8,r5
        lwz     r9,CFG_ICACHE_LOGBLOCKSZ(r10)
        srw.    r8,r8,r9                /* compute line count */
        crclr   cr0*4+so
        beqlr                           /* nothing to do? */
+#endif
        mtctr   r8
+#ifdef CONFIG_PPC64
 2:     icbi    0,r6
        add     r6,r6,r7
+#else
+2:     icbi    0, r7
+       addi    r7, r7, L1_CACHE_BYTES
+#endif
        bdnz    2b
        isync
        li      r3,0
index 6c7401b..217bb63 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
 
        .text
        .global __kernel_datapage_offset;
 __kernel_datapage_offset:
        .long   0
 
-V_FUNCTION_BEGIN(__get_datapage)
-  .cfi_startproc
-       /* We don't want that exposed or overridable as we want other objects
-        * to be able to bl directly to here
-        */
-       .protected __get_datapage
-       .hidden __get_datapage
-
-       mflr    r0
-  .cfi_register lr,r0
-
-       bcl     20,31,data_page_branch
-data_page_branch:
-       mflr    r3
-       mtlr    r0
-       addi    r3, r3, __kernel_datapage_offset-data_page_branch
-       lwz     r0,0(r3)
-  .cfi_restore lr
-       add     r3,r0,r3
-       blr
-  .cfi_endproc
-V_FUNCTION_END(__get_datapage)
-
 /*
  * void *__kernel_get_syscall_map(unsigned int *syscall_count) ;
  *
@@ -52,11 +30,10 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
   .cfi_startproc
        mflr    r12
   .cfi_register lr,r12
-       mr      r4,r3
-       bl      __get_datapage@local
+       mr.     r4,r3
+       get_datapage    r3, r0
        mtlr    r12
        addi    r3,r3,CFG_SYSCALL_MAP32
-       cmpli   cr0,r4,0
        beqlr
        li      r0,NR_syscalls
        stw     r0,0(r4)
@@ -75,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
   .cfi_startproc
        mflr    r12
   .cfi_register lr,r12
-       bl      __get_datapage@local
+       get_datapage    r3, r0
        lwz     r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
        lwz     r3,CFG_TB_TICKS_PER_SEC(r3)
        mtlr    r12
index 63e9145..ff5e214 100644 (file)
@@ -15,6 +15,7 @@
  * int __kernel_getcpu(unsigned *cpu, unsigned *node);
  *
  */
+#if defined(CONFIG_PPC64)
 V_FUNCTION_BEGIN(__kernel_getcpu)
   .cfi_startproc
        mfspr   r5,SPRN_SPRG_VDSO_READ
@@ -24,10 +25,26 @@ V_FUNCTION_BEGIN(__kernel_getcpu)
        rlwinm  r7,r5,16,31-15,31-0
        beq     cr0,1f
        stw     r6,0(r3)
-1:     beq     cr1,2f
-       stw     r7,0(r4)
-2:     crclr   cr0*4+so
+1:     crclr   cr0*4+so
        li      r3,0                    /* always success */
+       beqlr   cr1
+       stw     r7,0(r4)
+       blr
+  .cfi_endproc
+V_FUNCTION_END(__kernel_getcpu)
+#elif !defined(CONFIG_SMP)
+V_FUNCTION_BEGIN(__kernel_getcpu)
+  .cfi_startproc
+       cmpwi   cr0, r3, 0
+       cmpwi   cr1, r4, 0
+       li      r5, 0
+       beq     cr0, 1f
+       stw     r5, 0(r3)
+1:     li      r3, 0                   /* always success */
+       crclr   cr0*4+so
+       beqlr   cr1
+       stw     r5, 0(r4)
        blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_getcpu)
+#endif
index 3306672..a395156 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 
@@ -31,28 +32,26 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday)
        mflr    r12
   .cfi_register lr,r12
 
-       mr      r10,r3                  /* r10 saves tv */
+       mr.     r10,r3                  /* r10 saves tv */
        mr      r11,r4                  /* r11 saves tz */
-       bl      __get_datapage@local    /* get data page */
-       mr      r9, r3                  /* datapage ptr in r9 */
-       cmplwi  r10,0                   /* check if tv is NULL */
+       get_datapage    r9, r0
        beq     3f
-       lis     r7,1000000@ha           /* load up USEC_PER_SEC */
-       addi    r7,r7,1000000@l         /* so we get microseconds in r4 */
+       LOAD_REG_IMMEDIATE(r7, 1000000) /* load up USEC_PER_SEC */
        bl      __do_get_tspec@local    /* get sec/usec from tb & kernel */
        stw     r3,TVAL32_TV_SEC(r10)
        stw     r4,TVAL32_TV_USEC(r10)
 
 3:     cmplwi  r11,0                   /* check if tz is NULL */
-       beq     1f
+       mtlr    r12
+       crclr   cr0*4+so
+       li      r3,0
+       beqlr
+
        lwz     r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
        lwz     r5,CFG_TZ_DSTTIME(r9)
        stw     r4,TZONE_TZ_MINWEST(r11)
        stw     r5,TZONE_TZ_DSTTIME(r11)
 
-1:     mtlr    r12
-       crclr   cr0*4+so
-       li      r3,0
        blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_gettimeofday)
@@ -69,17 +68,23 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
        cmpli   cr0,r3,CLOCK_REALTIME
        cmpli   cr1,r3,CLOCK_MONOTONIC
        cror    cr0*4+eq,cr0*4+eq,cr1*4+eq
-       bne     cr0,99f
+
+       cmpli   cr5,r3,CLOCK_REALTIME_COARSE
+       cmpli   cr6,r3,CLOCK_MONOTONIC_COARSE
+       cror    cr5*4+eq,cr5*4+eq,cr6*4+eq
+
+       cror    cr0*4+eq,cr0*4+eq,cr5*4+eq
+       bne     cr0, .Lgettime_fallback
 
        mflr    r12                     /* r12 saves lr */
   .cfi_register lr,r12
        mr      r11,r4                  /* r11 saves tp */
-       bl      __get_datapage@local    /* get data page */
-       mr      r9,r3                   /* datapage ptr in r9 */
-       lis     r7,NSEC_PER_SEC@h       /* want nanoseconds */
-       ori     r7,r7,NSEC_PER_SEC@l
-50:    bl      __do_get_tspec@local    /* get sec/nsec from tb & kernel */
-       bne     cr1,80f                 /* not monotonic -> all done */
+       get_datapage    r9, r0
+       LOAD_REG_IMMEDIATE(r7, NSEC_PER_SEC)    /* load up NSEC_PER_SEC */
+       beq     cr5, .Lcoarse_clocks
+.Lprecise_clocks:
+       bl      __do_get_tspec@local    /* get sec/nsec from tb & kernel */
+       bne     cr1, .Lfinish           /* not monotonic -> all done */
 
        /*
         * CLOCK_MONOTONIC
@@ -103,12 +108,53 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
        add     r9,r9,r0
        lwz     r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
         cmpl    cr0,r8,r0              /* check if updated */
-       bne-    50b
+       bne-    .Lprecise_clocks
+       b       .Lfinish_monotonic
+
+       /*
+        * For coarse clocks we get data directly from the vdso data page, so
+        * we don't need to call __do_get_tspec, but we still need to do the
+        * counter trick.
+        */
+.Lcoarse_clocks:
+       lwz     r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
+       andi.   r0,r8,1                 /* pending update ? loop */
+       bne-    .Lcoarse_clocks
+       add     r9,r9,r0                /* r0 is already 0 */
+
+       /*
+        * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
+        * too
+        */
+       lwz     r3,STAMP_XTIME_SEC+LOPART(r9)
+       lwz     r4,STAMP_XTIME_NSEC+LOPART(r9)
+       bne     cr6,1f
+
+       /* CLOCK_MONOTONIC_COARSE */
+       lwz     r5,(WTOM_CLOCK_SEC+LOPART)(r9)
+       lwz     r6,WTOM_CLOCK_NSEC(r9)
+
+       /* check if counter has updated */
+       or      r0,r6,r5
+1:     or      r0,r0,r3
+       or      r0,r0,r4
+       xor     r0,r0,r0
+       add     r3,r3,r0
+       lwz     r0,CFG_TB_UPDATE_COUNT+LOPART(r9)
+       cmpl    cr0,r0,r8               /* check if updated */
+       bne-    .Lcoarse_clocks
+
+       /* Counter has not updated, so continue calculating proper values for
+        * sec and nsec if monotonic coarse, or just return with the proper
+        * values for realtime.
+        */
+       bne     cr6, .Lfinish
 
        /* Calculate and store result. Note that this mimics the C code,
         * which may cause funny results if nsec goes negative... is that
         * possible at all ?
         */
+.Lfinish_monotonic:
        add     r3,r3,r5
        add     r4,r4,r6
        cmpw    cr0,r4,r7
@@ -116,11 +162,12 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
        blt     1f
        subf    r4,r7,r4
        addi    r3,r3,1
-1:     bge     cr1,80f
+1:     bge     cr1, .Lfinish
        addi    r3,r3,-1
        add     r4,r4,r7
 
-80:    stw     r3,TSPC32_TV_SEC(r11)
+.Lfinish:
+       stw     r3,TSPC32_TV_SEC(r11)
        stw     r4,TSPC32_TV_NSEC(r11)
 
        mtlr    r12
@@ -131,7 +178,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
        /*
         * syscall fallback
         */
-99:
+.Lgettime_fallback:
        li      r0,__NR_clock_gettime
   .cfi_restore lr
        sc
@@ -149,17 +196,20 @@ V_FUNCTION_END(__kernel_clock_gettime)
 V_FUNCTION_BEGIN(__kernel_clock_getres)
   .cfi_startproc
        /* Check for supported clock IDs */
-       cmpwi   cr0,r3,CLOCK_REALTIME
-       cmpwi   cr1,r3,CLOCK_MONOTONIC
-       cror    cr0*4+eq,cr0*4+eq,cr1*4+eq
-       bne     cr0,99f
+       cmplwi  cr0, r3, CLOCK_MAX
+       cmpwi   cr1, r3, CLOCK_REALTIME_COARSE
+       cmpwi   cr7, r3, CLOCK_MONOTONIC_COARSE
+       bgt     cr0, 99f
+       LOAD_REG_IMMEDIATE(r5, KTIME_LOW_RES)
+       beq     cr1, 1f
+       beq     cr7, 1f
 
        mflr    r12
   .cfi_register lr,r12
-       bl      __get_datapage@local    /* get data page */
+       get_datapage    r3, r0
        lwz     r5, CLOCK_HRTIMER_RES(r3)
        mtlr    r12
-       li      r3,0
+1:     li      r3,0
        cmpli   cr0,r4,0
        crclr   cr0*4+so
        beqlr
@@ -168,11 +218,11 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
        blr
 
        /*
-        * syscall fallback
+        * invalid clock
         */
 99:
-       li      r0,__NR_clock_getres
-       sc
+       li      r3, EINVAL
+       crset   so
        blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_clock_getres)
@@ -190,16 +240,15 @@ V_FUNCTION_BEGIN(__kernel_time)
   .cfi_register lr,r12
 
        mr      r11,r3                  /* r11 holds t */
-       bl      __get_datapage@local
-       mr      r9, r3                  /* datapage ptr in r9 */
+       get_datapage    r9, r0
 
        lwz     r3,STAMP_XTIME_SEC+LOPART(r9)
 
        cmplwi  r11,0                   /* check if t is NULL */
-       beq     2f
-       stw     r3,0(r11)               /* store result at *t */
-2:     mtlr    r12
+       mtlr    r12
        crclr   cr0*4+so
+       beqlr
+       stw     r3,0(r11)               /* store result at *t */
        blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_time)
index 00c025b..5206c2e 100644 (file)
@@ -155,7 +155,7 @@ VERSION
                __kernel_sync_dicache_p5;
                __kernel_sigtramp32;
                __kernel_sigtramp_rt32;
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) || !defined(CONFIG_SMP)
                __kernel_getcpu;
 #endif
 
index 8eb867d..25c14a0 100644 (file)
@@ -67,6 +67,9 @@ _GLOBAL(load_up_altivec)
 #ifdef CONFIG_PPC32
        mfspr   r5,SPRN_SPRG_THREAD             /* current task's THREAD (phys) */
        oris    r9,r9,MSR_VEC@h
+#ifdef CONFIG_VMAP_STACK
+       tovirt(r5, r5)
+#endif
 #else
        ld      r4,PACACURRENT(r13)
        addi    r5,r4,THREAD            /* Get THREAD */
index 8834220..b4c89a1 100644 (file)
@@ -323,7 +323,7 @@ SECTIONS
 #endif
 
        /* The initial task and kernel stack */
-       INIT_TASK_DATA_SECTION(THREAD_SIZE)
+       INIT_TASK_DATA_SECTION(THREAD_ALIGN)
 
        .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
                PAGE_ALIGNED_DATA(PAGE_SIZE)
index 744dba9..803940d 100644 (file)
@@ -63,12 +63,10 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
        }
        isync();
 
-       pagefault_disable();
        if (is_load)
-               ret = raw_copy_from_user(to, from, n);
+               ret = probe_user_read(to, (const void __user *)from, n);
        else
-               ret = raw_copy_to_user(to, from, n);
-       pagefault_enable();
+               ret = probe_user_write((void __user *)to, from, n);
 
        /* switch the pid first to avoid running host with unallocated pid */
        if (quadrant == 1 && pid != old_pid)
index c6fbbd2..dbc2fec 100644 (file)
@@ -1801,6 +1801,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        tlbsync
        ptesync
 
+BEGIN_FTR_SECTION
        /* Radix: Handle the case where the guest used an illegal PID */
        LOAD_REG_ADDR(r4, mmu_base_pid)
        lwz     r3, VCPU_GUEST_PID(r9)
@@ -1830,6 +1831,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        addi    r7,r7,0x1000
        bdnz    1b
        ptesync
+END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
 
 2:
 #endif /* CONFIG_PPC_RADIX_MMU */
index 66858b7..85215e7 100644 (file)
@@ -484,7 +484,7 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
        kvmppc_xive_select_irq(state, &hw_num, &xd);
 
        /*
-        * See command in xive_lock_and_mask() concerning masking
+        * See comment in xive_lock_and_mask() concerning masking
         * via firmware.
         */
        if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
index 8bbbd97..c11b0a0 100644 (file)
 #include <asm/feature-fixups.h>
 #include <asm/code-patching-asm.h>
 
+#ifdef CONFIG_VMAP_STACK
+#define ADDR_OFFSET    0
+#else
+#define ADDR_OFFSET    PAGE_OFFSET
+#endif
+
 #ifdef CONFIG_SMP
        .section .bss
        .align  2
@@ -47,8 +53,8 @@ mmu_hash_lock:
        .text
 _GLOBAL(hash_page)
 #ifdef CONFIG_SMP
-       lis     r8, (mmu_hash_lock - PAGE_OFFSET)@h
-       ori     r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
+       lis     r8, (mmu_hash_lock - ADDR_OFFSET)@h
+       ori     r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l
        lis     r0,0x0fff
        b       10f
 11:    lwz     r6,0(r8)
@@ -66,9 +72,12 @@ _GLOBAL(hash_page)
        cmplw   0,r4,r0
        ori     r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
        mfspr   r5, SPRN_SPRG_PGDIR     /* phys page-table root */
+#ifdef CONFIG_VMAP_STACK
+       tovirt(r5, r5)
+#endif
        blt+    112f                    /* assume user more likely */
-       lis     r5, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
-       addi    r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
+       lis     r5, (swapper_pg_dir - ADDR_OFFSET)@ha   /* if kernel address, use */
+       addi    r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l        /* kernel page table */
        rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
 112:
 #ifndef CONFIG_PTE_64BIT
@@ -80,6 +89,9 @@ _GLOBAL(hash_page)
        lwzx    r8,r8,r5                /* Get L1 entry */
        rlwinm. r8,r8,0,0,20            /* extract pt base address */
 #endif
+#ifdef CONFIG_VMAP_STACK
+       tovirt(r8, r8)
+#endif
 #ifdef CONFIG_SMP
        beq-    hash_page_out           /* return if no mapping */
 #else
@@ -137,9 +149,9 @@ retry:
 
 #ifdef CONFIG_SMP
        eieio
-       lis     r8, (mmu_hash_lock - PAGE_OFFSET)@ha
+       lis     r8, (mmu_hash_lock - ADDR_OFFSET)@ha
        li      r0,0
-       stw     r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
+       stw     r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
 #endif
 
        /* Return from the exception */
@@ -152,9 +164,9 @@ retry:
 #ifdef CONFIG_SMP
 hash_page_out:
        eieio
-       lis     r8, (mmu_hash_lock - PAGE_OFFSET)@ha
+       lis     r8, (mmu_hash_lock - ADDR_OFFSET)@ha
        li      r0,0
-       stw     r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
+       stw     r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
        blr
 #endif /* CONFIG_SMP */
 
@@ -329,7 +341,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
        patch_site      1f, patch__hash_page_A1
        patch_site      2f, patch__hash_page_A2
        /* Get the address of the primary PTE group in the hash table (r3) */
-0:     lis     r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
+0:     lis     r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */
 1:     rlwimi  r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
 2:     rlwinm  r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
        xor     r3,r3,r0                /* make primary hash */
@@ -343,10 +355,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
        beq+    10f                     /* no PTE: go look for an empty slot */
        tlbie   r4
 
-       lis     r4, (htab_hash_searches - PAGE_OFFSET)@ha
-       lwz     r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
+       lis     r4, (htab_hash_searches - ADDR_OFFSET)@ha
+       lwz     r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
        addi    r6,r6,1                 /* count how many searches we do */
-       stw     r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
+       stw     r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
 
        /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
        mtctr   r0
@@ -378,10 +390,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
        beq+    found_empty
 
        /* update counter of times that the primary PTEG is full */
-       lis     r4, (primary_pteg_full - PAGE_OFFSET)@ha
-       lwz     r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+       lis     r4, (primary_pteg_full - ADDR_OFFSET)@ha
+       lwz     r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
        addi    r6,r6,1
-       stw     r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+       stw     r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
 
        patch_site      0f, patch__hash_page_C
        /* Search the secondary PTEG for an empty slot */
@@ -415,8 +427,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
         * lockup here but that shouldn't happen
         */
 
-1:     lis     r4, (next_slot - PAGE_OFFSET)@ha        /* get next evict slot */
-       lwz     r6, (next_slot - PAGE_OFFSET)@l(r4)
+1:     lis     r4, (next_slot - ADDR_OFFSET)@ha        /* get next evict slot */
+       lwz     r6, (next_slot - ADDR_OFFSET)@l(r4)
        addi    r6,r6,HPTE_SIZE                 /* search for candidate */
        andi.   r6,r6,7*HPTE_SIZE
        stw     r6,next_slot@l(r4)
index 69b2419..0a1c65a 100644 (file)
@@ -413,6 +413,7 @@ void __init MMU_init_hw(void)
 void __init MMU_init_hw_patch(void)
 {
        unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
+       unsigned int hash;
 
        if (ppc_md.progress)
                ppc_md.progress("hash:patch", 0x345);
@@ -424,8 +425,12 @@ void __init MMU_init_hw_patch(void)
        /*
         * Patch up the instructions in hashtable.S:create_hpte
         */
-       modify_instruction_site(&patch__hash_page_A0, 0xffff,
-                               ((unsigned int)Hash - PAGE_OFFSET) >> 16);
+       if (IS_ENABLED(CONFIG_VMAP_STACK))
+               hash = (unsigned int)Hash;
+       else
+               hash = (unsigned int)Hash - PAGE_OFFSET;
+
+       modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
        modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
        modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
        modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
index b30435c..523d4d3 100644 (file)
@@ -652,6 +652,7 @@ static void init_hpte_page_sizes(void)
 
 static void __init htab_init_page_sizes(void)
 {
+       bool aligned = true;
        init_hpte_page_sizes();
 
        if (!debug_pagealloc_enabled()) {
@@ -659,7 +660,15 @@ static void __init htab_init_page_sizes(void)
                 * Pick a size for the linear mapping. Currently, we only
                 * support 16M, 1M and 4K which is the default
                 */
-               if (mmu_psize_defs[MMU_PAGE_16M].shift)
+               if (IS_ENABLED(STRICT_KERNEL_RWX) &&
+                   (unsigned long)_stext % 0x1000000) {
+                       if (mmu_psize_defs[MMU_PAGE_16M].shift)
+                               pr_warn("Kernel not 16M aligned, "
+                                       "disabling 16M linear map alignment");
+                       aligned = false;
+               }
+
+               if (mmu_psize_defs[MMU_PAGE_16M].shift && aligned)
                        mmu_linear_psize = MMU_PAGE_16M;
                else if (mmu_psize_defs[MMU_PAGE_1M].shift)
                        mmu_linear_psize = MMU_PAGE_1M;
index 75483b4..2bf7e1b 100644 (file)
@@ -378,7 +378,6 @@ static inline void pgtable_free(void *table, int index)
        }
 }
 
-#ifdef CONFIG_SMP
 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
 {
        unsigned long pgf = (unsigned long)table;
@@ -395,12 +394,6 @@ void __tlb_remove_table(void *_table)
 
        return pgtable_free(table, index);
 }
-#else
-void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
-{
-       return pgtable_free(table, index);
-}
-#endif
 
 #ifdef CONFIG_PROC_FS
 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
index 974109b..dd1bea4 100644 (file)
@@ -337,7 +337,11 @@ static void __init radix_init_pgtable(void)
        }
 
        /* Find out how many PID bits are supported */
-       if (cpu_has_feature(CPU_FTR_HVMODE)) {
+       if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
+               if (!mmu_pid_bits)
+                       mmu_pid_bits = 20;
+               mmu_base_pid = 1;
+       } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
                if (!mmu_pid_bits)
                        mmu_pid_bits = 20;
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
index a95175c..03f43c9 100644 (file)
@@ -1161,6 +1161,9 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
        if (unlikely(pid == MMU_NO_CONTEXT))
                return;
 
+       if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+               return;
+
        /*
         * If this context hasn't run on that CPU before and KVM is
         * around, there's a slim chance that the guest on another
index b5047f9..8db0507 100644 (file)
@@ -233,7 +233,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
 
        // Read/write fault in a valid region (the exception table search passed
        // above), but blocked by KUAP is bad, it can never succeed.
-       if (bad_kuap_fault(regs, is_write))
+       if (bad_kuap_fault(regs, address, is_write))
                return true;
 
        // What's left? Kernel fault on user in well defined regions (extable
@@ -279,12 +279,8 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
                if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
                    access_ok(nip, sizeof(*nip))) {
                        unsigned int inst;
-                       int res;
 
-                       pagefault_disable();
-                       res = __get_user_inatomic(inst, nip);
-                       pagefault_enable();
-                       if (!res)
+                       if (!probe_user_read(&inst, nip, sizeof(inst)))
                                return !store_updates_sp(inst);
                        *must_retry = true;
                }
@@ -354,6 +350,9 @@ static void sanity_check_fault(bool is_write, bool is_user,
         * Userspace trying to access kernel address, we get PROTFAULT for that.
         */
        if (is_user && address >= TASK_SIZE) {
+               if ((long)address == -1)
+                       return;
+
                pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
                                   current->comm, current->pid, address,
                                   from_kuid(&init_user_ns, current_uid()));
index 0e6ed44..16dd95b 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/code-patching.h>
 #include <mm/mmu_decl.h>
 
-static pgprot_t kasan_prot_ro(void)
+static pgprot_t __init kasan_prot_ro(void)
 {
        if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
                return PAGE_READONLY;
@@ -20,7 +20,7 @@ static pgprot_t kasan_prot_ro(void)
        return PAGE_KERNEL_RO;
 }
 
-static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
+static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
 {
        unsigned long va = (unsigned long)kasan_early_shadow_page;
        phys_addr_t pa = __pa(kasan_early_shadow_page);
@@ -30,29 +30,25 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
                __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
 }
 
-static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
 {
        pmd_t *pmd;
        unsigned long k_cur, k_next;
-       pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;
+       pte_t *new = NULL;
 
        pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
 
        for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
-               pte_t *new;
-
                k_next = pgd_addr_end(k_cur, k_end);
                if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
                        continue;
 
-               if (slab_is_available())
-                       new = pte_alloc_one_kernel(&init_mm);
-               else
+               if (!new)
                        new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
 
                if (!new)
                        return -ENOMEM;
-               kasan_populate_pte(new, prot);
+               kasan_populate_pte(new, PAGE_KERNEL);
 
                smp_wmb(); /* See comment in __pte_alloc */
 
@@ -63,39 +59,27 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
                        new = NULL;
                }
                spin_unlock(&init_mm.page_table_lock);
-
-               if (new && slab_is_available())
-                       pte_free_kernel(&init_mm, new);
        }
        return 0;
 }
 
-static void __ref *kasan_get_one_page(void)
-{
-       if (slab_is_available())
-               return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-
-       return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-}
-
-static int __ref kasan_init_region(void *start, size_t size)
+static int __init kasan_init_region(void *start, size_t size)
 {
        unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
        unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
        unsigned long k_cur;
        int ret;
-       void *block = NULL;
+       void *block;
 
        ret = kasan_init_shadow_page_tables(k_start, k_end);
        if (ret)
                return ret;
 
-       if (!slab_is_available())
-               block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+       block = memblock_alloc(k_end - k_start, PAGE_SIZE);
 
        for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
                pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
-               void *va = block ? block + k_cur - k_start : kasan_get_one_page();
+               void *va = block + k_cur - k_start;
                pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
 
                if (!va)
@@ -129,6 +113,31 @@ static void __init kasan_remap_early_shadow_ro(void)
        flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
 }
 
+static void __init kasan_unmap_early_shadow_vmalloc(void)
+{
+       unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
+       unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
+       unsigned long k_cur;
+       phys_addr_t pa = __pa(kasan_early_shadow_page);
+
+       if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+               int ret = kasan_init_shadow_page_tables(k_start, k_end);
+
+               if (ret)
+                       panic("kasan: kasan_init_shadow_page_tables() failed");
+       }
+       for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
+               pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+               pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+
+               if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
+                       continue;
+
+               __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0);
+       }
+       flush_tlb_kernel_range(k_start, k_end);
+}
+
 void __init kasan_mmu_init(void)
 {
        int ret;
@@ -165,34 +174,22 @@ void __init kasan_init(void)
        pr_info("KASAN init done\n");
 }
 
-#ifdef CONFIG_MODULES
-void *module_alloc(unsigned long size)
+void __init kasan_late_init(void)
 {
-       void *base;
-
-       base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
-                                   GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
-                                   NUMA_NO_NODE, __builtin_return_address(0));
-
-       if (!base)
-               return NULL;
-
-       if (!kasan_init_region(base, size))
-               return base;
-
-       vfree(base);
-
-       return NULL;
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+               kasan_unmap_early_shadow_vmalloc();
 }
-#endif
 
 #ifdef CONFIG_PPC_BOOK3S_32
 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
 
 static void __init kasan_early_hash_table(void)
 {
-       modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
-       modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
+       unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash :
+                                                           __pa(early_hash);
+
+       modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
+       modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
 
        Hash = (struct hash_pte *)early_hash;
 }
index f5535ea..ef7b111 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/fixmap.h>
 #include <asm/swiotlb.h>
 #include <asm/rtas.h>
+#include <asm/kasan.h>
 
 #include <mm/mmu_decl.h>
 
@@ -301,6 +302,9 @@ void __init mem_init(void)
 
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
        set_max_mapnr(max_pfn);
+
+       kasan_late_init();
+
        memblock_free_all();
 
 #ifdef CONFIG_HIGHMEM
index 8e99649..7097e07 100644 (file)
@@ -181,3 +181,9 @@ void mmu_mark_rodata_ro(void);
 static inline void mmu_mark_initmem_nx(void) { }
 static inline void mmu_mark_rodata_ro(void) { }
 #endif
+
+#ifdef CONFIG_PPC_DEBUG_WX
+void ptdump_check_wx(void);
+#else
+static inline void ptdump_check_wx(void) { }
+#endif
index 96eb8e4..3189308 100644 (file)
@@ -21,33 +21,34 @@ extern int __map_without_ltlbs;
 static unsigned long block_mapped_ram;
 
 /*
- * Return PA for this VA if it is in an area mapped with LTLBs.
+ * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
  * Otherwise, returns 0
  */
 phys_addr_t v_block_mapped(unsigned long va)
 {
        unsigned long p = PHYS_IMMR_BASE;
 
-       if (__map_without_ltlbs)
-               return 0;
        if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
                return p + va - VIRT_IMMR_BASE;
+       if (__map_without_ltlbs)
+               return 0;
        if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
                return __pa(va);
        return 0;
 }
 
 /*
- * Return VA for a given PA mapped with LTLBs or 0 if not mapped
+ * Return VA for a given PA mapped with LTLBs or fixmap
+ * Return 0 if not mapped
  */
 unsigned long p_block_mapped(phys_addr_t pa)
 {
        unsigned long p = PHYS_IMMR_BASE;
 
-       if (__map_without_ltlbs)
-               return 0;
        if (pa >= p && pa < p + IMMR_SIZE)
                return VIRT_IMMR_BASE + pa - p;
+       if (__map_without_ltlbs)
+               return 0;
        if (pa < block_mapped_ram)
                return (unsigned long)__va(pa);
        return 0;
index 50d68d2..3c7dec7 100644 (file)
@@ -1616,11 +1616,11 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
        return count;
 }
 
-static const struct file_operations topology_ops = {
-       .read = seq_read,
-       .write = topology_write,
-       .open = topology_open,
-       .release = single_release
+static const struct proc_ops topology_proc_ops = {
+       .proc_read      = seq_read,
+       .proc_write     = topology_write,
+       .proc_open      = topology_open,
+       .proc_release   = single_release,
 };
 
 static int topology_update_init(void)
@@ -1630,7 +1630,7 @@ static int topology_update_init(void)
        if (vphn_enabled)
                topology_schedule_update();
 
-       if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
+       if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_proc_ops))
                return -ENOMEM;
 
        topology_inited = 1;
index 73b8416..5fb90ed 100644 (file)
@@ -218,6 +218,7 @@ void mark_rodata_ro(void)
 
        if (v_block_mapped((unsigned long)_sinittext)) {
                mmu_mark_rodata_ro();
+               ptdump_check_wx();
                return;
        }
 
index 2f9ddc2..2061562 100644 (file)
@@ -24,6 +24,8 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 
+#include <mm/mmu_decl.h>
+
 #include "ptdump.h"
 
 /*
@@ -173,10 +175,12 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
 
 static void note_prot_wx(struct pg_state *st, unsigned long addr)
 {
+       pte_t pte = __pte(st->current_flags);
+
        if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
                return;
 
-       if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X)))
+       if (!pte_write(pte) || !pte_exec(pte))
                return;
 
        WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
index 6ffcb80..6f347fa 100644 (file)
@@ -28,15 +28,12 @@ static unsigned int user_getsp32(unsigned int sp, int is_first)
        unsigned int stack_frame[2];
        void __user *p = compat_ptr(sp);
 
-       if (!access_ok(p, sizeof(stack_frame)))
-               return 0;
-
        /*
         * The most likely reason for this is that we returned -EFAULT,
         * which means that we've done all that we can do from
         * interrupt context.
         */
-       if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame)))
+       if (probe_user_read(stack_frame, (void __user *)p, sizeof(stack_frame)))
                return 0;
 
        if (!is_first)
@@ -54,11 +51,7 @@ static unsigned long user_getsp64(unsigned long sp, int is_first)
 {
        unsigned long stack_frame[3];
 
-       if (!access_ok((void __user *)sp, sizeof(stack_frame)))
-               return 0;
-
-       if (__copy_from_user_inatomic(stack_frame, (void __user *)sp,
-                                       sizeof(stack_frame)))
+       if (probe_user_read(stack_frame, (void __user *)sp, sizeof(stack_frame)))
                return 0;
 
        if (!is_first)
@@ -103,7 +96,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
                        first_frame = 0;
                }
        } else {
-               pagefault_disable();
 #ifdef CONFIG_PPC64
                if (!is_32bit_task()) {
                        while (depth--) {
@@ -112,7 +104,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
                                        break;
                                first_frame = 0;
                        }
-                       pagefault_enable();
                        return;
                }
 #endif
@@ -123,6 +114,5 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
                                break;
                        first_frame = 0;
                }
-               pagefault_enable();
        }
 }
index 19124b0..1ad03c5 100644 (file)
@@ -157,10 +157,6 @@ static void mpc8xx_pmu_read(struct perf_event *event)
 
 static void mpc8xx_pmu_del(struct perf_event *event, int flags)
 {
-       /* mfspr r10, SPRN_SPRG_SCRATCH0 */
-       unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
-                           __PPC_SPR(SPRN_SPRG_SCRATCH0);
-
        mpc8xx_pmu_read(event);
 
        /* If it was the last user, stop counting to avoid useles overhead */
@@ -173,6 +169,10 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
                break;
        case PERF_8xx_ID_ITLB_LOAD_MISS:
                if (atomic_dec_return(&itlb_miss_ref) == 0) {
+                       /* mfspr r10, SPRN_SPRG_SCRATCH0 */
+                       unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
+                                           __PPC_SPR(SPRN_SPRG_SCRATCH0);
+
                        patch_instruction_site(&patch__itlbmiss_exit_1, insn);
 #ifndef CONFIG_PIN_TLB_TEXT
                        patch_instruction_site(&patch__itlbmiss_exit_2, insn);
@@ -181,6 +181,10 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
                break;
        case PERF_8xx_ID_DTLB_LOAD_MISS:
                if (atomic_dec_return(&dtlb_miss_ref) == 0) {
+                       /* mfspr r10, SPRN_DAR */
+                       unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
+                                           __PPC_SPR(SPRN_DAR);
+
                        patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
                        patch_instruction_site(&patch__dtlbmiss_exit_2, insn);
                        patch_instruction_site(&patch__dtlbmiss_exit_3, insn);
index 35d5425..cbc2519 100644 (file)
@@ -155,12 +155,8 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
            ((unsigned long)ptr & 7))
                return -EFAULT;
 
-       pagefault_disable();
-       if (!__get_user_inatomic(*ret, ptr)) {
-               pagefault_enable();
+       if (!probe_user_read(ret, ptr, sizeof(*ret)))
                return 0;
-       }
-       pagefault_enable();
 
        return read_user_stack_slow(ptr, ret, 8);
 }
@@ -171,12 +167,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
            ((unsigned long)ptr & 3))
                return -EFAULT;
 
-       pagefault_disable();
-       if (!__get_user_inatomic(*ret, ptr)) {
-               pagefault_enable();
+       if (!probe_user_read(ret, ptr, sizeof(*ret)))
                return 0;
-       }
-       pagefault_enable();
 
        return read_user_stack_slow(ptr, ret, 4);
 }
@@ -293,17 +285,11 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
  */
 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
 {
-       int rc;
-
        if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
            ((unsigned long)ptr & 3))
                return -EFAULT;
 
-       pagefault_disable();
-       rc = __get_user_inatomic(*ret, ptr);
-       pagefault_enable();
-
-       return rc;
+       return probe_user_read(ret, ptr, sizeof(*ret));
 }
 
 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
index 4860462..3086055 100644 (file)
@@ -415,7 +415,6 @@ static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
 static __u64 power_pmu_bhrb_to(u64 addr)
 {
        unsigned int instr;
-       int ret;
        __u64 target;
 
        if (is_kernel_addr(addr)) {
@@ -426,13 +425,8 @@ static __u64 power_pmu_bhrb_to(u64 addr)
        }
 
        /* Userspace: need copy instruction here then translate it */
-       pagefault_disable();
-       ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
-       if (ret) {
-               pagefault_enable();
+       if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr)))
                return 0;
-       }
-       pagefault_enable();
 
        target = branch_target(&instr);
        if ((!target) || (instr & BRANCH_ABSOLUTE))
index 13631f3..04bf6ec 100644 (file)
@@ -434,9 +434,9 @@ static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
        memset(&lpbfifo, 0, sizeof(struct lpbfifo_data));
        spin_lock_init(&lpbfifo.lock);
 
-       lpbfifo.chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
-       if (lpbfifo.chan == NULL)
-               return -EPROBE_DEFER;
+       lpbfifo.chan = dma_request_chan(&pdev->dev, "rx-tx");
+       if (IS_ERR(lpbfifo.chan))
+               return PTR_ERR(lpbfifo.chan);
 
        if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) {
                dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n");
index 273145a..b0d5471 100644 (file)
@@ -64,7 +64,7 @@ static void quirk_mpc8360e_qe_enet10(void)
                return;
        }
 
-       base = ioremap(res.start, res.end - res.start + 1);
+       base = ioremap(res.start, resource_size(&res));
 
        /*
         * set output delay adjustments to default values according
index 8c7ea24..48f7d96 100644 (file)
@@ -252,6 +252,15 @@ static int smp_85xx_start_cpu(int cpu)
        out_be64((u64 *)(&spin_table->addr_h),
                __pa(ppc_function_entry(generic_secondary_smp_init)));
 #else
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       /*
+        * We need also to write addr_h to spin table for systems
+        * in which their physical memory start address was configured
+        * to above 4G, otherwise the secondary core can not get
+        * correct entry to start from.
+        */
+       out_be32(&spin_table->addr_h, __pa(__early_start) >> 32);
+#endif
        out_be32(&spin_table->addr_l, __pa(__early_start));
 #endif
        flush_spin_table(spin_table);
index 6c3c0cd..b301ef9 100644 (file)
@@ -60,10 +60,6 @@ static void __init twr_p1025_pic_init(void)
  */
 static void __init twr_p1025_setup_arch(void)
 {
-#ifdef CONFIG_QUICC_ENGINE
-       struct device_node *np;
-#endif
-
        if (ppc_md.progress)
                ppc_md.progress("twr_p1025_setup_arch()", 0);
 
@@ -77,6 +73,7 @@ static void __init twr_p1025_setup_arch(void)
 #if IS_ENABLED(CONFIG_UCC_GETH) || IS_ENABLED(CONFIG_SERIAL_QE)
        if (machine_is(twr_p1025)) {
                struct ccsr_guts __iomem *guts;
+               struct device_node *np;
 
                np = of_find_compatible_node(NULL, NULL, "fsl,p1021-guts");
                if (np) {
index e28df29..1f80253 100644 (file)
@@ -177,6 +177,10 @@ config PPC_970_NAP
 config PPC_P7_NAP
        bool
 
+config PPC_BOOK3S_IDLE
+       def_bool y
+       depends on (PPC_970_NAP || PPC_P7_NAP)
+
 config PPC_INDIRECT_PIO
        bool
        select GENERIC_IOMAP
index 8d7f9c3..6caedc8 100644 (file)
@@ -1,4 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
+config PPC32
+       bool
+       default y if !PPC64
+       select KASAN_VMALLOC if KASAN && MODULES
+
 config PPC64
        bool "64-bit kernel"
        select ZLIB_DEFLATE
@@ -31,12 +36,14 @@ config PPC_BOOK3S_6xx
        select PPC_HAVE_PMU_SUPPORT
        select PPC_HAVE_KUEP
        select PPC_HAVE_KUAP
+       select HAVE_ARCH_VMAP_STACK
 
 config PPC_BOOK3S_601
        bool "PowerPC 601"
        select PPC_BOOK3S_32
        select PPC_FPU
        select PPC_HAVE_KUAP
+       select HAVE_ARCH_VMAP_STACK
 
 config PPC_85xx
        bool "Freescale 85xx"
@@ -49,6 +56,7 @@ config PPC_8xx
        select PPC_HAVE_KUEP
        select PPC_HAVE_KUAP
        select PPC_MM_SLICES if HUGETLB_PAGE
+       select HAVE_ARCH_VMAP_STACK
 
 config 40x
        bool "AMCC 40x"
index 47f7310..6f019df 100644 (file)
@@ -229,7 +229,7 @@ static void __init maple_init_IRQ(void)
        root = of_find_node_by_path("/");
        naddr = of_n_addr_cells(root);
        opprop = of_get_property(root, "platform-open-pic", &opplen);
-       if (opprop != 0) {
+       if (opprop) {
                openpic_addr = of_read_number(opprop, naddr);
                has_isus = (opplen > naddr);
                printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n",
index a6ee080..2b3dfd0 100644 (file)
@@ -790,48 +790,81 @@ static int opal_sysfs_init(void)
        return 0;
 }
 
-static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
-                              struct bin_attribute *bin_attr,
-                              char *buf, loff_t off, size_t count)
+static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
+                               struct bin_attribute *bin_attr, char *buf,
+                               loff_t off, size_t count)
 {
        return memory_read_from_buffer(buf, count, &off, bin_attr->private,
                                       bin_attr->size);
 }
 
-static struct bin_attribute symbol_map_attr = {
-       .attr = {.name = "symbol_map", .mode = 0400},
-       .read = symbol_map_read
-};
-
-static void opal_export_symmap(void)
+static int opal_add_one_export(struct kobject *parent, const char *export_name,
+                              struct device_node *np, const char *prop_name)
 {
-       const __be64 *syms;
-       unsigned int size;
-       struct device_node *fw;
+       struct bin_attribute *attr = NULL;
+       const char *name = NULL;
+       u64 vals[2];
        int rc;
 
-       fw = of_find_node_by_path("/ibm,opal/firmware");
-       if (!fw)
-               return;
-       syms = of_get_property(fw, "symbol-map", &size);
-       if (!syms || size != 2 * sizeof(__be64))
-               return;
+       rc = of_property_read_u64_array(np, prop_name, &vals[0], 2);
+       if (rc)
+               goto out;
 
-       /* Setup attributes */
-       symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
-       symbol_map_attr.size = be64_to_cpu(syms[1]);
+       attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+       name = kstrdup(export_name, GFP_KERNEL);
+       if (!name) {
+               rc = -ENOMEM;
+               goto out;
+       }
 
-       rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
-       if (rc)
-               pr_warn("Error %d creating OPAL symbols file\n", rc);
+       sysfs_bin_attr_init(attr);
+       attr->attr.name = name;
+       attr->attr.mode = 0400;
+       attr->read = export_attr_read;
+       attr->private = __va(vals[0]);
+       attr->size = vals[1];
+
+       rc = sysfs_create_bin_file(parent, attr);
+out:
+       if (rc) {
+               kfree(name);
+               kfree(attr);
+       }
+
+       return rc;
 }
 
-static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
-                               struct bin_attribute *bin_attr, char *buf,
-                               loff_t off, size_t count)
+static void opal_add_exported_attrs(struct device_node *np,
+                                   struct kobject *kobj)
 {
-       return memory_read_from_buffer(buf, count, &off, bin_attr->private,
-                                      bin_attr->size);
+       struct device_node *child;
+       struct property *prop;
+
+       for_each_property_of_node(np, prop) {
+               int rc;
+
+               if (!strcmp(prop->name, "name") ||
+                   !strcmp(prop->name, "phandle"))
+                       continue;
+
+               rc = opal_add_one_export(kobj, prop->name, np, prop->name);
+               if (rc) {
+                       pr_warn("Unable to add export %pOF/%s, rc = %d!\n",
+                               np, prop->name, rc);
+               }
+       }
+
+       for_each_child_of_node(np, child) {
+               struct kobject *child_kobj;
+
+               child_kobj = kobject_create_and_add(child->name, kobj);
+               if (!child_kobj) {
+                       pr_err("Unable to create export dir for %pOF\n", child);
+                       continue;
+               }
+
+               opal_add_exported_attrs(child, child_kobj);
+       }
 }
 
 /*
@@ -843,11 +876,8 @@ static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
  */
 static void opal_export_attrs(void)
 {
-       struct bin_attribute *attr;
        struct device_node *np;
-       struct property *prop;
        struct kobject *kobj;
-       u64 vals[2];
        int rc;
 
        np = of_find_node_by_path("/ibm,opal/firmware/exports");
@@ -861,41 +891,16 @@ static void opal_export_attrs(void)
                return;
        }
 
-       for_each_property_of_node(np, prop) {
-               if (!strcmp(prop->name, "name") || !strcmp(prop->name, "phandle"))
-                       continue;
-
-               if (of_property_read_u64_array(np, prop->name, &vals[0], 2))
-                       continue;
-
-               attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+       opal_add_exported_attrs(np, kobj);
 
-               if (attr == NULL) {
-                       pr_warn("Failed kmalloc for bin_attribute!");
-                       continue;
-               }
-
-               sysfs_bin_attr_init(attr);
-               attr->attr.name = kstrdup(prop->name, GFP_KERNEL);
-               attr->attr.mode = 0400;
-               attr->read = export_attr_read;
-               attr->private = __va(vals[0]);
-               attr->size = vals[1];
-
-               if (attr->attr.name == NULL) {
-                       pr_warn("Failed kstrdup for bin_attribute attr.name");
-                       kfree(attr);
-                       continue;
-               }
-
-               rc = sysfs_create_bin_file(kobj, attr);
-               if (rc) {
-                       pr_warn("Error %d creating OPAL sysfs exports/%s file\n",
-                                rc, prop->name);
-                       kfree(attr->attr.name);
-                       kfree(attr);
-               }
-       }
+       /*
+        * NB: symbol_map existed before the generic export interface so it
+        * lives under the top level opal_kobj.
+        */
+       rc = opal_add_one_export(opal_kobj, "symbol_map",
+                                np->parent, "symbol-map");
+       if (rc)
+               pr_warn("Error %d creating OPAL symbols file\n", rc);
 
        of_node_put(np);
 }
@@ -1042,8 +1047,6 @@ static int __init opal_init(void)
        /* Create "opal" kobject under /sys/firmware */
        rc = opal_sysfs_init();
        if (rc == 0) {
-               /* Export symbol map to userspace */
-               opal_export_symmap();
                /* Setup dump region interface */
                opal_dump_region_init();
                /* Setup error log interface */
@@ -1056,11 +1059,10 @@ static int __init opal_init(void)
                opal_sys_param_init();
                /* Setup message log sysfs interface. */
                opal_msglog_sysfs_init();
+               /* Add all export properties*/
+               opal_export_attrs();
        }
 
-       /* Export all properties */
-       opal_export_attrs();
-
        /* Initialize platform devices: IPMI backend, PRD & flash interface */
        opal_pdev_init("ibm,opal-ipmi");
        opal_pdev_init("ibm,opal-flash");
index da1068a..22c22cd 100644 (file)
@@ -188,7 +188,7 @@ static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
        unsigned int pe_num = pe->pe_number;
 
        WARN_ON(pe->pdev);
-       WARN_ON(pe->npucomp); /* NPUs are not supposed to be freed */
+       WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */
        kfree(pe->npucomp);
        memset(pe, 0, sizeof(struct pnv_ioda_pe));
        clear_bit(pe_num, phb->ioda.pe_alloc);
@@ -777,6 +777,34 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
        return 0;
 }
 
+static void pnv_ioda_unset_peltv(struct pnv_phb *phb,
+                                struct pnv_ioda_pe *pe,
+                                struct pci_dev *parent)
+{
+       int64_t rc;
+
+       while (parent) {
+               struct pci_dn *pdn = pci_get_pdn(parent);
+
+               if (pdn && pdn->pe_number != IODA_INVALID_PE) {
+                       rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
+                                               pe->pe_number,
+                                               OPAL_REMOVE_PE_FROM_DOMAIN);
+                       /* XXX What to do in case of error ? */
+               }
+               parent = parent->bus->self;
+       }
+
+       opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
+                                 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+
+       /* Disassociate PE in PELT */
+       rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
+                               pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
+       if (rc)
+               pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
+}
+
 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
 {
        struct pci_dev *parent;
@@ -792,7 +820,7 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
                fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
                parent = pe->pbus->self;
                if (pe->flags & PNV_IODA_PE_BUS_ALL)
-                       count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+                       count = resource_size(&pe->pbus->busn_res);
                else
                        count = 1;
 
@@ -827,25 +855,13 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
        for (rid = pe->rid; rid < rid_end; rid++)
                phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
 
-       /* Release from all parents PELT-V */
-       while (parent) {
-               struct pci_dn *pdn = pci_get_pdn(parent);
-               if (pdn && pdn->pe_number != IODA_INVALID_PE) {
-                       rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
-                                               pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
-                       /* XXX What to do in case of error ? */
-               }
-               parent = parent->bus->self;
-       }
-
-       opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
-                                 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+       /*
+        * Release from all parents PELT-V. NPUs don't have a PELTV
+        * table
+        */
+       if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
+               pnv_ioda_unset_peltv(phb, pe, parent);
 
-       /* Disassociate PE in PELT */
-       rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
-                               pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
-       if (rc)
-               pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
        rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
                             bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
        if (rc)
@@ -874,7 +890,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
                fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
                parent = pe->pbus->self;
                if (pe->flags & PNV_IODA_PE_BUS_ALL)
-                       count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+                       count = resource_size(&pe->pbus->busn_res);
                else
                        count = 1;
 
@@ -1062,20 +1078,20 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
                return NULL;
        }
 
-       /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
-        * pointer in the PE data structure, both should be destroyed at the
-        * same time. However, this needs to be looked at more closely again
-        * once we actually start removing things (Hotplug, SR-IOV, ...)
+       /* NOTE: We don't get a reference for the pointer in the PE
+        * data structure, both the device and PE structures should be
+        * destroyed at the same time. However, removing nvlink
+        * devices will need some work.
         *
         * At some point we want to remove the PDN completely anyways
         */
-       pci_dev_get(dev);
        pdn->pe_number = pe->pe_number;
        pe->flags = PNV_IODA_PE_DEV;
        pe->pdev = dev;
        pe->pbus = NULL;
        pe->mve_number = -1;
        pe->rid = dev->bus->number << 8 | pdn->devfn;
+       pe->device_count++;
 
        pe_info(pe, "Associated device to PE\n");
 
@@ -1084,13 +1100,13 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
                pnv_ioda_free_pe(pe);
                pdn->pe_number = IODA_INVALID_PE;
                pe->pdev = NULL;
-               pci_dev_put(dev);
                return NULL;
        }
 
        /* Put PE to the list */
+       mutex_lock(&phb->ioda.pe_list_mutex);
        list_add_tail(&pe->list, &phb->ioda.pe_list);
-
+       mutex_unlock(&phb->ioda.pe_list_mutex);
        return pe;
 }
 
@@ -1205,6 +1221,14 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
        struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
        struct pnv_phb *phb = hose->private_data;
 
+       /*
+        * Intentionally leak a reference on the npu device (for
+        * nvlink only; this is not an opencapi path) to make sure it
+        * never goes away, as it's been the case all along and some
+        * work is needed otherwise.
+        */
+       pci_dev_get(npu_pdev);
+
        /*
         * Due to a hardware errata PE#0 on the NPU is reserved for
         * error handling. This means we only have three PEs remaining
@@ -1228,11 +1252,11 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
                         */
                        dev_info(&npu_pdev->dev,
                                "Associating to existing PE %x\n", pe_num);
-                       pci_dev_get(npu_pdev);
                        npu_pdn = pci_get_pdn(npu_pdev);
                        rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
                        npu_pdn->pe_number = pe_num;
                        phb->ioda.pe_rmap[rid] = pe->pe_number;
+                       pe->device_count++;
 
                        /* Map the PE to this link */
                        rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
@@ -1268,8 +1292,6 @@ static void pnv_pci_ioda_setup_PEs(void)
 {
        struct pci_controller *hose;
        struct pnv_phb *phb;
-       struct pci_bus *bus;
-       struct pci_dev *pdev;
        struct pnv_ioda_pe *pe;
 
        list_for_each_entry(hose, &hose_list, list_node) {
@@ -1281,11 +1303,6 @@ static void pnv_pci_ioda_setup_PEs(void)
                        if (phb->model == PNV_PHB_MODEL_NPU2)
                                WARN_ON_ONCE(pnv_npu2_init(hose));
                }
-               if (phb->type == PNV_PHB_NPU_OCAPI) {
-                       bus = hose->bus;
-                       list_for_each_entry(pdev, &bus->devices, bus_list)
-                               pnv_ioda_setup_dev_PE(pdev);
-               }
        }
        list_for_each_entry(hose, &hose_list, list_node) {
                phb = hose->private_data;
@@ -1558,6 +1575,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
 
        /* Reserve PE for each VF */
        for (vf_index = 0; vf_index < num_vfs; vf_index++) {
+               int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
+               int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
+               struct pci_dn *vf_pdn;
+
                if (pdn->m64_single_mode)
                        pe_num = pdn->pe_num_map[vf_index];
                else
@@ -1570,13 +1591,11 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
                pe->pbus = NULL;
                pe->parent_dev = pdev;
                pe->mve_number = -1;
-               pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
-                          pci_iov_virtfn_devfn(pdev, vf_index);
+               pe->rid = (vf_bus << 8) | vf_devfn;
 
                pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
                        hose->global_number, pdev->bus->number,
-                       PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
-                       PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
+                       PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
 
                if (pnv_ioda_configure_pe(phb, pe)) {
                        /* XXX What do we do here ? */
@@ -1590,6 +1609,15 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
                list_add_tail(&pe->list, &phb->ioda.pe_list);
                mutex_unlock(&phb->ioda.pe_list_mutex);
 
+               /* associate this pe to it's pdn */
+               list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
+                       if (vf_pdn->busno == vf_bus &&
+                           vf_pdn->devfn == vf_devfn) {
+                               vf_pdn->pe_number = pe_num;
+                               break;
+                       }
+               }
+
                pnv_pci_ioda2_setup_dma_pe(phb, pe);
 #ifdef CONFIG_IOMMU_API
                iommu_register_group(&pe->table_group,
@@ -1719,21 +1747,23 @@ int pnv_pcibios_sriov_disable(struct pci_dev *pdev)
        pnv_pci_sriov_disable(pdev);
 
        /* Release PCI data */
-       remove_dev_pci_data(pdev);
+       remove_sriov_vf_pdns(pdev);
        return 0;
 }
 
 int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
 {
        /* Allocate PCI data */
-       add_dev_pci_data(pdev);
+       add_sriov_vf_pdns(pdev);
 
        return pnv_pci_sriov_enable(pdev, num_vfs);
 }
 #endif /* CONFIG_PCI_IOV */
 
-static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
+static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)
 {
+       struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+       struct pnv_phb *phb = hose->private_data;
        struct pci_dn *pdn = pci_get_pdn(pdev);
        struct pnv_ioda_pe *pe;
 
@@ -2889,9 +2919,6 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
        struct pci_dn *pdn;
        int mul, total_vfs;
 
-       if (!pdev->is_physfn || pci_dev_is_added(pdev))
-               return;
-
        pdn = pci_get_pdn(pdev);
        pdn->vfs_expanded = 0;
        pdn->m64_single_mode = false;
@@ -2966,6 +2993,30 @@ truncate_iov:
                res->end = res->start - 1;
        }
 }
+
+static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
+{
+       if (WARN_ON(pci_dev_is_added(pdev)))
+               return;
+
+       if (pdev->is_virtfn) {
+               struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
+
+               /*
+                * VF PEs are single-device PEs so their pdev pointer needs to
+                * be set. The pdev doesn't exist when the PE is allocated (in
+                * (pcibios_sriov_enable()) so we fix it up here.
+                */
+               pe->pdev = pdev;
+               WARN_ON(!(pe->flags & PNV_IODA_PE_VF));
+       } else if (pdev->is_physfn) {
+               /*
+                * For PFs adjust their allocated IOV resources to match what
+                * the PHB can support using it's M64 BAR table.
+                */
+               pnv_pci_ioda_fixup_iov_resources(pdev);
+       }
+}
 #endif /* CONFIG_PCI_IOV */
 
 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
@@ -3062,19 +3113,9 @@ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
 #ifdef CONFIG_DEBUG_FS
 static int pnv_pci_diag_data_set(void *data, u64 val)
 {
-       struct pci_controller *hose;
-       struct pnv_phb *phb;
+       struct pnv_phb *phb = data;
        s64 ret;
 
-       if (val != 1ULL)
-               return -EINVAL;
-
-       hose = (struct pci_controller *)data;
-       if (!hose || !hose->private_data)
-               return -ENODEV;
-
-       phb = hose->private_data;
-
        /* Retrieve the diag data from firmware */
        ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
                                          phb->diag_data_size);
@@ -3089,6 +3130,33 @@ static int pnv_pci_diag_data_set(void *data, u64 val)
 DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set,
                         "%llu\n");
 
+static int pnv_pci_ioda_pe_dump(void *data, u64 val)
+{
+       struct pnv_phb *phb = data;
+       int pe_num;
+
+       for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
+               struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num];
+
+               if (!test_bit(pe_num, phb->ioda.pe_alloc))
+                       continue;
+
+               pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n",
+                       pe->rid, pe->device_count,
+                       (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "",
+                       (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "",
+                       (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "",
+                       (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "",
+                       (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "",
+                       (pe->flags & PNV_IODA_PE_VF) ? "vf " : "");
+       }
+
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_ioda_pe_dump_fops, NULL,
+                        pnv_pci_ioda_pe_dump, "%llu\n");
+
 #endif /* CONFIG_DEBUG_FS */
 
 static void pnv_pci_ioda_create_dbgfs(void)
@@ -3113,7 +3181,9 @@ static void pnv_pci_ioda_create_dbgfs(void)
                }
 
                debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs,
-                                          hose, &pnv_pci_diag_data_fops);
+                                          phb, &pnv_pci_diag_data_fops);
+               debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs,
+                                          phb, &pnv_pci_ioda_pe_dump_fops);
        }
 #endif /* CONFIG_DEBUG_FS */
 }
@@ -3383,6 +3453,28 @@ static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
        return true;
 }
 
+static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev)
+{
+       struct pci_controller *hose = pci_bus_to_host(dev->bus);
+       struct pnv_phb *phb = hose->private_data;
+       struct pci_dn *pdn;
+       struct pnv_ioda_pe *pe;
+
+       if (!phb->initialized)
+               return true;
+
+       pdn = pci_get_pdn(dev);
+       if (!pdn)
+               return false;
+
+       if (pdn->pe_number == IODA_INVALID_PE) {
+               pe = pnv_ioda_setup_dev_PE(dev);
+               if (!pe)
+                       return false;
+       }
+       return true;
+}
+
 static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
                                       int num)
 {
@@ -3512,7 +3604,10 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
        struct pnv_phb *phb = pe->phb;
        struct pnv_ioda_pe *slave, *tmp;
 
+       mutex_lock(&phb->ioda.pe_list_mutex);
        list_del(&pe->list);
+       mutex_unlock(&phb->ioda.pe_list_mutex);
+
        switch (phb->type) {
        case PNV_PHB_IODA1:
                pnv_pci_ioda1_release_pe_dma(pe);
@@ -3520,6 +3615,8 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
        case PNV_PHB_IODA2:
                pnv_pci_ioda2_release_pe_dma(pe);
                break;
+       case PNV_PHB_NPU_OCAPI:
+               break;
        default:
                WARN_ON(1);
        }
@@ -3594,9 +3691,29 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
                       OPAL_ASSERT_RESET);
 }
 
+static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus)
+{
+       struct pci_controller *hose = bus->sysdata;
+       struct pnv_phb *phb = hose->private_data;
+       struct pnv_ioda_pe *pe;
+
+       list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+               if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
+                       continue;
+
+               if (!pe->pbus)
+                       continue;
+
+               if (bus->number == ((pe->rid >> 8) & 0xFF)) {
+                       pe->pbus = bus;
+                       break;
+               }
+       }
+}
+
 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
-       .dma_dev_setup          = pnv_pci_dma_dev_setup,
-       .dma_bus_setup          = pnv_pci_dma_bus_setup,
+       .dma_dev_setup          = pnv_pci_ioda_dma_dev_setup,
+       .dma_bus_setup          = pnv_pci_ioda_dma_bus_setup,
        .iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported,
        .setup_msi_irqs         = pnv_setup_msi_irqs,
        .teardown_msi_irqs      = pnv_teardown_msi_irqs,
@@ -3609,7 +3726,6 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
 };
 
 static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
-       .dma_dev_setup          = pnv_pci_dma_dev_setup,
        .setup_msi_irqs         = pnv_setup_msi_irqs,
        .teardown_msi_irqs      = pnv_teardown_msi_irqs,
        .enable_device_hook     = pnv_pci_enable_device_hook,
@@ -3620,7 +3736,8 @@ static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
 };
 
 static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
-       .enable_device_hook     = pnv_pci_enable_device_hook,
+       .enable_device_hook     = pnv_ocapi_enable_device_hook,
+       .release_device         = pnv_pci_release_device,
        .window_alignment       = pnv_pci_window_alignment,
        .reset_secondary_bus    = pnv_pci_reset_secondary_bus,
        .shutdown               = pnv_pci_ioda_shutdown,
@@ -3855,14 +3972,13 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
                hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
                break;
        default:
-               phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
                hose->controller_ops = pnv_pci_ioda_controller_ops;
        }
 
        ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
 
 #ifdef CONFIG_PCI_IOV
-       ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
+       ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov;
        ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
        ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
        ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
index c0bea75..5bf8182 100644 (file)
@@ -38,7 +38,7 @@ static DEFINE_MUTEX(tunnel_mutex);
 
 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
 {
-       struct device_node *parent = np;
+       struct device_node *node = np;
        u32 bdfn;
        u64 phbid;
        int ret;
@@ -48,25 +48,29 @@ int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
                return -ENXIO;
 
        bdfn = ((bdfn & 0x00ffff00) >> 8);
-       while ((parent = of_get_parent(parent))) {
-               if (!PCI_DN(parent)) {
-                       of_node_put(parent);
+       for (node = np; node; node = of_get_parent(node)) {
+               if (!PCI_DN(node)) {
+                       of_node_put(node);
                        break;
                }
 
-               if (!of_device_is_compatible(parent, "ibm,ioda2-phb") &&
-                   !of_device_is_compatible(parent, "ibm,ioda3-phb")) {
-                       of_node_put(parent);
+               if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
+                   !of_device_is_compatible(node, "ibm,ioda3-phb") &&
+                   !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
+                       of_node_put(node);
                        continue;
                }
 
-               ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
+               ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
                if (ret) {
-                       of_node_put(parent);
+                       of_node_put(node);
                        return -ENXIO;
                }
 
-               *id = PCI_SLOT_ID(phbid, bdfn);
+               if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
+                       *id = PCI_PHB_SLOT_ID(phbid);
+               else
+                       *id = PCI_SLOT_ID(phbid, bdfn);
                return 0;
        }
 
@@ -810,53 +814,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
        return tbl;
 }
 
-void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
-{
-       struct pci_controller *hose = pci_bus_to_host(pdev->bus);
-       struct pnv_phb *phb = hose->private_data;
-#ifdef CONFIG_PCI_IOV
-       struct pnv_ioda_pe *pe;
-       struct pci_dn *pdn;
-
-       /* Fix the VF pdn PE number */
-       if (pdev->is_virtfn) {
-               pdn = pci_get_pdn(pdev);
-               WARN_ON(pdn->pe_number != IODA_INVALID_PE);
-               list_for_each_entry(pe, &phb->ioda.pe_list, list) {
-                       if (pe->rid == ((pdev->bus->number << 8) |
-                           (pdev->devfn & 0xff))) {
-                               pdn->pe_number = pe->pe_number;
-                               pe->pdev = pdev;
-                               break;
-                       }
-               }
-       }
-#endif /* CONFIG_PCI_IOV */
-
-       if (phb && phb->dma_dev_setup)
-               phb->dma_dev_setup(phb, pdev);
-}
-
-void pnv_pci_dma_bus_setup(struct pci_bus *bus)
-{
-       struct pci_controller *hose = bus->sysdata;
-       struct pnv_phb *phb = hose->private_data;
-       struct pnv_ioda_pe *pe;
-
-       list_for_each_entry(pe, &phb->ioda.pe_list, list) {
-               if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
-                       continue;
-
-               if (!pe->pbus)
-                       continue;
-
-               if (bus->number == ((pe->rid >> 8) & 0xFF)) {
-                       pe->pbus = bus;
-                       break;
-               }
-       }
-}
-
 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
index f914f0b..d3bbdea 100644 (file)
@@ -108,7 +108,6 @@ struct pnv_phb {
        int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
                         unsigned int hwirq, unsigned int virq,
                         unsigned int is_64, struct msi_msg *msg);
-       void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
        int (*init_m64)(struct pnv_phb *phb);
        int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
        void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
@@ -189,8 +188,6 @@ extern void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr);
 extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
 extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
 
-extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
-extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
 extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
index 8349860..11fdae8 100644 (file)
@@ -233,6 +233,10 @@ static void  __noreturn pnv_restart(char *cmd)
                        rc = opal_cec_reboot();
                else if (strcmp(cmd, "full") == 0)
                        rc = opal_cec_reboot2(OPAL_REBOOT_FULL_IPL, NULL);
+               else if (strcmp(cmd, "mpipl") == 0)
+                       rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, NULL);
+               else if (strcmp(cmd, "error") == 0)
+                       rc = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, NULL);
                else
                        rc = OPAL_UNSUPPORTED;
 
index 595e9f8..24c1836 100644 (file)
@@ -21,7 +21,6 @@ config PPC_PSERIES
        select PPC_DOORBELL
        select HOTPLUG_CPU
        select ARCH_RANDOM
-       select PPC_DOORBELL
        select FORCE_SMP
        select SWIOTLB
        default y
index d4a8f17..3e49cc2 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/firmware.h>
 #include <asm/prom.h>
 #include <asm/udbg.h>
+#include <asm/svm.h>
 
 #include "pseries.h"
 
@@ -55,7 +56,8 @@ hypertas_fw_features_table[] = {
        {FW_FEATURE_LLAN,               "hcall-lLAN"},
        {FW_FEATURE_BULK_REMOVE,        "hcall-bulk"},
        {FW_FEATURE_XDABR,              "hcall-xdabr"},
-       {FW_FEATURE_MULTITCE,           "hcall-multi-tce"},
+       {FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE,
+                                       "hcall-multi-tce"},
        {FW_FEATURE_SPLPAR,             "hcall-splpar"},
        {FW_FEATURE_VPHN,               "hcall-vphn"},
        {FW_FEATURE_SET_MODE,           "hcall-set-mode"},
@@ -100,6 +102,12 @@ static void __init fw_hypertas_feature_init(const char *hypertas,
                }
        }
 
+       if (is_secure_guest() &&
+           (powerpc_firmware_features & FW_FEATURE_PUT_TCE_IND)) {
+               powerpc_firmware_features &= ~FW_FEATURE_PUT_TCE_IND;
+               pr_debug("SVM: disabling PUT_TCE_IND firmware feature\n");
+       }
+
        pr_debug(" <- fw_hypertas_feature_init()\n");
 }
 
index c126b94..a4d40a3 100644 (file)
@@ -360,8 +360,10 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
 
        for (i = 0; i < scns_per_block; i++) {
                pfn = PFN_DOWN(phys_addr);
-               if (!pfn_present(pfn))
+               if (!pfn_present(pfn)) {
+                       phys_addr += MIN_MEMORY_BLOCK_SIZE;
                        continue;
+               }
 
                rc = rc && is_mem_section_removable(pfn, PAGES_PER_SECTION);
                phys_addr += MIN_MEMORY_BLOCK_SIZE;
index 6ba081d..2e0a8ea 100644 (file)
@@ -36,7 +36,6 @@
 #include <asm/udbg.h>
 #include <asm/mmzone.h>
 #include <asm/plpar_wrappers.h>
-#include <asm/svm.h>
 
 #include "pseries.h"
 
@@ -133,10 +132,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
        return be64_to_cpu(*tcep);
 }
 
-static void tce_free_pSeriesLP(struct iommu_table*, long, long);
+static void tce_free_pSeriesLP(unsigned long liobn, long, long);
 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
 
-static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
+static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
                                long npages, unsigned long uaddr,
                                enum dma_data_direction direction,
                                unsigned long attrs)
@@ -147,25 +146,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
        int ret = 0;
        long tcenum_start = tcenum, npages_start = npages;
 
-       rpn = __pa(uaddr) >> TCE_SHIFT;
+       rpn = __pa(uaddr) >> tceshift;
        proto_tce = TCE_PCI_READ;
        if (direction != DMA_TO_DEVICE)
                proto_tce |= TCE_PCI_WRITE;
 
        while (npages--) {
-               tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
-               rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
+               tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
+               rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
 
                if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
                        ret = (int)rc;
-                       tce_free_pSeriesLP(tbl, tcenum_start,
+                       tce_free_pSeriesLP(liobn, tcenum_start,
                                           (npages_start - (npages + 1)));
                        break;
                }
 
                if (rc && printk_ratelimit()) {
                        printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
-                       printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+                       printk("\tindex   = 0x%llx\n", (u64)liobn);
                        printk("\ttcenum  = 0x%llx\n", (u64)tcenum);
                        printk("\ttce val = 0x%llx\n", tce );
                        dump_stack();
@@ -193,8 +192,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
        int ret = 0;
        unsigned long flags;
 
-       if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
-               return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
+       if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
+               return tce_build_pSeriesLP(tbl->it_index, tcenum,
+                                          tbl->it_page_shift, npages, uaddr,
                                           direction, attrs);
        }
 
@@ -210,8 +210,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
                /* If allocation fails, fall back to the loop implementation */
                if (!tcep) {
                        local_irq_restore(flags);
-                       return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
-                                           direction, attrs);
+                       return tce_build_pSeriesLP(tbl->it_index, tcenum,
+                                       tbl->it_page_shift,
+                                       npages, uaddr, direction, attrs);
                }
                __this_cpu_write(tce_page, tcep);
        }
@@ -262,16 +263,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
        return ret;
 }
 
-static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
+static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
 {
        u64 rc;
 
        while (npages--) {
-               rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
+               rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
 
                if (rc && printk_ratelimit()) {
                        printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
-                       printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+                       printk("\tindex   = 0x%llx\n", (u64)liobn);
                        printk("\ttcenum  = 0x%llx\n", (u64)tcenum);
                        dump_stack();
                }
@@ -285,8 +286,8 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
 {
        u64 rc;
 
-       if (!firmware_has_feature(FW_FEATURE_MULTITCE))
-               return tce_free_pSeriesLP(tbl, tcenum, npages);
+       if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
+               return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
 
        rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
 
@@ -401,6 +402,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
        u64 rc = 0;
        long l, limit;
 
+       if (!firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
+               unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
+               unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
+                               be64_to_cpu(maprange->dma_base);
+               unsigned long tcenum = dmastart >> tceshift;
+               unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
+               void *uaddr = __va(start_pfn << PAGE_SHIFT);
+
+               return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
+                               tcenum, tceshift, npages, (unsigned long) uaddr,
+                               DMA_BIDIRECTIONAL, 0);
+       }
+
        local_irq_disable();    /* to protect tcep and the page behind it */
        tcep = __this_cpu_read(tce_page);
 
@@ -1320,24 +1334,18 @@ void iommu_init_early_pSeries(void)
        of_reconfig_notifier_register(&iommu_reconfig_nb);
        register_memory_notifier(&iommu_mem_nb);
 
-       /*
-        * Secure guest memory is inacessible to devices so regular DMA isn't
-        * possible.
-        *
-        * In that case keep devices' dma_map_ops as NULL so that the generic
-        * DMA code path will use SWIOTLB to bounce buffers for DMA.
-        */
-       if (!is_secure_guest())
-               set_pci_dma_ops(&dma_iommu_ops);
+       set_pci_dma_ops(&dma_iommu_ops);
 }
 
 static int __init disable_multitce(char *str)
 {
        if (strcmp(str, "off") == 0 &&
            firmware_has_feature(FW_FEATURE_LPAR) &&
-           firmware_has_feature(FW_FEATURE_MULTITCE)) {
+           (firmware_has_feature(FW_FEATURE_PUT_TCE_IND) ||
+            firmware_has_feature(FW_FEATURE_STUFF_TCE))) {
                printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
-               powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
+               powerpc_firmware_features &=
+                       ~(FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE);
        }
        return 1;
 }
index 60cb29a..3c3da25 100644 (file)
@@ -582,12 +582,12 @@ static int vcpudispatch_stats_open(struct inode *inode, struct file *file)
        return single_open(file, vcpudispatch_stats_display, NULL);
 }
 
-static const struct file_operations vcpudispatch_stats_proc_ops = {
-       .open           = vcpudispatch_stats_open,
-       .read           = seq_read,
-       .write          = vcpudispatch_stats_write,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops vcpudispatch_stats_proc_ops = {
+       .proc_open      = vcpudispatch_stats_open,
+       .proc_read      = seq_read,
+       .proc_write     = vcpudispatch_stats_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 static ssize_t vcpudispatch_stats_freq_write(struct file *file,
@@ -626,12 +626,12 @@ static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file)
        return single_open(file, vcpudispatch_stats_freq_display, NULL);
 }
 
-static const struct file_operations vcpudispatch_stats_freq_proc_ops = {
-       .open           = vcpudispatch_stats_freq_open,
-       .read           = seq_read,
-       .write          = vcpudispatch_stats_freq_write,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
+       .proc_open      = vcpudispatch_stats_freq_open,
+       .proc_read      = seq_read,
+       .proc_write     = vcpudispatch_stats_freq_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 static int __init vcpudispatch_stats_procfs_init(void)
index e33e8bc..b8d28ab 100644 (file)
@@ -435,10 +435,10 @@ static void maxmem_data(struct seq_file *m)
 {
        unsigned long maxmem = 0;
 
-       maxmem += drmem_info->n_lmbs * drmem_info->lmb_size;
+       maxmem += (unsigned long)drmem_info->n_lmbs * drmem_info->lmb_size;
        maxmem += hugetlb_total_pages() * PAGE_SIZE;
 
-       seq_printf(m, "MaxMem=%ld\n", maxmem);
+       seq_printf(m, "MaxMem=%lu\n", maxmem);
 }
 
 static int pseries_lparcfg_data(struct seq_file *m, void *v)
@@ -698,12 +698,12 @@ static int lparcfg_open(struct inode *inode, struct file *file)
        return single_open(file, lparcfg_data, NULL);
 }
 
-static const struct file_operations lparcfg_fops = {
-       .read           = seq_read,
-       .write          = lparcfg_write,
-       .open           = lparcfg_open,
-       .release        = single_release,
-       .llseek         = seq_lseek,
+static const struct proc_ops lparcfg_proc_ops = {
+       .proc_read      = seq_read,
+       .proc_write     = lparcfg_write,
+       .proc_open      = lparcfg_open,
+       .proc_release   = single_release,
+       .proc_lseek     = seq_lseek,
 };
 
 static int __init lparcfg_init(void)
@@ -714,7 +714,7 @@ static int __init lparcfg_init(void)
        if (firmware_has_feature(FW_FEATURE_SPLPAR))
                mode |= 0200;
 
-       if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops)) {
+       if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_proc_ops)) {
                printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
                return -EIO;
        }
index c2ef320..0b4467e 100644 (file)
@@ -69,7 +69,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
                return rc;
 
        p->bound_addr = saved;
-       dev_dbg(&p->pdev->dev, "bound drc 0x%x to %pR\n", p->drc_index, &p->res);
+       dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n",
+               p->drc_index, (unsigned long)saved);
        return rc;
 }
 
@@ -133,7 +134,7 @@ static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
                goto err_out;
 
        p->bound_addr = start_addr;
-       dev_dbg(&p->pdev->dev, "bound drc 0x%x to %pR\n", p->drc_index, &p->res);
+       dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n", p->drc_index, start_addr);
        return rc;
 
 err_out:
@@ -322,6 +323,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
        p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
        if (!p->bus) {
                dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
+               kfree(p->bus_desc.provider_name);
                return -ENXIO;
        }
 
@@ -356,7 +358,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
        ndr_desc.mapping = &mapping;
        ndr_desc.num_mappings = 1;
        ndr_desc.nd_set = &p->nd_set;
-       set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
 
        if (p->is_volatile)
                p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
@@ -477,6 +478,7 @@ static int papr_scm_remove(struct platform_device *pdev)
 
        nvdimm_bus_unregister(p->bus);
        drc_pmem_unbind(p);
+       kfree(p->bus_desc.provider_name);
        kfree(p);
 
        return 0;
index 7228309..911534b 100644 (file)
@@ -192,7 +192,7 @@ int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
 int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
 {
        /* Allocate PCI data */
-       add_dev_pci_data(pdev);
+       add_sriov_vf_pdns(pdev);
        return pseries_pci_sriov_enable(pdev, num_vfs);
 }
 
@@ -204,7 +204,7 @@ int pseries_pcibios_sriov_disable(struct pci_dev *pdev)
        /* Releasing pe_num_map */
        kfree(pdn->pe_num_map);
        /* Release PCI data */
-       remove_dev_pci_data(pdev);
+       remove_sriov_vf_pdns(pdev);
        pci_vf_drivers_autoprobe(pdev, true);
        return 0;
 }
index 8a9c4fb..7f7369f 100644 (file)
@@ -391,9 +391,9 @@ out:
        return rv ? rv : count;
 }
 
-static const struct file_operations ofdt_fops = {
-       .write = ofdt_write,
-       .llseek = noop_llseek,
+static const struct proc_ops ofdt_proc_ops = {
+       .proc_write     = ofdt_write,
+       .proc_lseek     = noop_llseek,
 };
 
 /* create /proc/powerpc/ofdt write-only by root */
@@ -401,7 +401,7 @@ static int proc_ppc64_create_ofdt(void)
 {
        struct proc_dir_entry *ent;
 
-       ent = proc_create("powerpc/ofdt", 0200, NULL, &ofdt_fops);
+       ent = proc_create("powerpc/ofdt", 0200, NULL, &ofdt_proc_ops);
        if (ent)
                proc_set_size(ent, 0);
 
index a000128..2879c4f 100644 (file)
@@ -152,13 +152,12 @@ static int scanlog_release(struct inode * inode, struct file * file)
        return 0;
 }
 
-static const struct file_operations scanlog_fops = {
-       .owner          = THIS_MODULE,
-       .read           = scanlog_read,
-       .write          = scanlog_write,
-       .open           = scanlog_open,
-       .release        = scanlog_release,
-       .llseek         = noop_llseek,
+static const struct proc_ops scanlog_proc_ops = {
+       .proc_read      = scanlog_read,
+       .proc_write     = scanlog_write,
+       .proc_open      = scanlog_open,
+       .proc_release   = scanlog_release,
+       .proc_lseek     = noop_llseek,
 };
 
 static int __init scanlog_init(void)
@@ -176,7 +175,7 @@ static int __init scanlog_init(void)
                goto err;
 
        ent = proc_create("powerpc/rtas/scan-log-dump", 0400, NULL,
-                         &scanlog_fops);
+                         &scanlog_proc_ops);
        if (!ent)
                goto err;
        return 0;
index 79e2287..f682b7b 100644 (file)
@@ -1176,6 +1176,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
        if (tbl == NULL)
                return NULL;
 
+       kref_init(&tbl->it_kref);
+
        of_parse_dma_window(dev->dev.of_node, dma_window,
                            &tbl->it_index, &offset, &size);
 
index 617a443..4a8874b 100644 (file)
@@ -1065,13 +1065,11 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
        addr += mfspr(SPRN_MCAR);
 
        if (is_in_pci_mem_space(addr)) {
-               if (user_mode(regs)) {
-                       pagefault_disable();
-                       ret = get_user(inst, (__u32 __user *)regs->nip);
-                       pagefault_enable();
-               } else {
+               if (user_mode(regs))
+                       ret = probe_user_read(&inst, (void __user *)regs->nip,
+                                             sizeof(inst));
+               else
                        ret = probe_kernel_address((void *)regs->nip, inst);
-               }
 
                if (!ret && mcheck_handle_load(regs, inst)) {
                        regs->nip += 4;
index 934a773..a3a72b7 100644 (file)
@@ -964,7 +964,7 @@ static struct irq_chip mpic_irq_chip = {
 };
 
 #ifdef CONFIG_SMP
-static struct irq_chip mpic_ipi_chip = {
+static const struct irq_chip mpic_ipi_chip = {
        .irq_mask       = mpic_mask_ipi,
        .irq_unmask     = mpic_unmask_ipi,
        .irq_eoi        = mpic_end_ipi,
@@ -978,7 +978,7 @@ static struct irq_chip mpic_tm_chip = {
 };
 
 #ifdef CONFIG_MPIC_U3_HT_IRQS
-static struct irq_chip mpic_irq_ht_chip = {
+static const struct irq_chip mpic_irq_ht_chip = {
        .irq_startup    = mpic_startup_ht_irq,
        .irq_shutdown   = mpic_shutdown_ht_irq,
        .irq_mask       = mpic_mask_irq,
index 7b9fe0a..014e00e 100755 (executable)
 # based on relocs_check.pl
 # Copyright Â© 2009 IBM Corporation
 
-if [ $# -lt 2 ]; then
-       echo "$0 [path to objdump] [path to vmlinux]" 1>&2
+if [ $# -lt 3 ]; then
+       echo "$0 [path to objdump] [path to nm] [path to vmlinux]" 1>&2
        exit 1
 fi
 
-# Have Kbuild supply the path to objdump so we handle cross compilation.
+# Have Kbuild supply the path to objdump and nm so we handle cross compilation.
 objdump="$1"
-vmlinux="$2"
+nm="$2"
+vmlinux="$3"
+
+# Remove from the bad relocations those that match an undefined weak symbol
+# which will result in an absolute relocation to 0.
+# Weak unresolved symbols are of that form in nm output:
+# "                  w _binary__btf_vmlinux_bin_end"
+undef_weak_symbols=$($nm "$vmlinux" | awk '$1 ~ /w/ { print $2 }')
 
 bad_relocs=$(
 $objdump -R "$vmlinux" |
@@ -26,8 +33,6 @@ $objdump -R "$vmlinux" |
        # These relocations are okay
        # On PPC64:
        #       R_PPC64_RELATIVE, R_PPC64_NONE
-       #       R_PPC64_ADDR64 mach_<name>
-       #       R_PPC64_ADDR64 __crc_<name>
        # On PPC:
        #       R_PPC_RELATIVE, R_PPC_ADDR16_HI,
        #       R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
@@ -39,8 +44,7 @@ R_PPC_ADDR16_HI
 R_PPC_ADDR16_HA
 R_PPC_RELATIVE
 R_PPC_NONE' |
-       grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' |
-       grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_'
+       ([ "$undef_weak_symbols" ] && grep -F -w -v "$undef_weak_symbols" || cat)
 )
 
 if [ -z "$bad_relocs" ]; then
index c4d246e..c4c982d 100644 (file)
@@ -13,13 +13,13 @@ extern int print_insn_spu(unsigned long insn, unsigned long memaddr);
 #else
 static inline int print_insn_powerpc(unsigned long insn, unsigned long memaddr)
 {
-       printf("%.8x", insn);
+       printf("%.8lx", insn);
        return 0;
 }
 
 static inline int print_insn_spu(unsigned long insn, unsigned long memaddr)
 {
-       printf("%.8x", insn);
+       printf("%.8lx", insn);
        return 0;
 }
 #endif
index a705604..e8c84d2 100644 (file)
@@ -1192,16 +1192,19 @@ static int do_step(struct pt_regs *regs)
 
 static void bootcmds(void)
 {
+       char tmp[64];
        int cmd;
 
        cmd = inchar();
-       if (cmd == 'r')
-               ppc_md.restart(NULL);
-       else if (cmd == 'h')
+       if (cmd == 'r') {
+               getstring(tmp, 64);
+               ppc_md.restart(tmp);
+       } else if (cmd == 'h') {
                ppc_md.halt();
-       else if (cmd == 'p')
+       } else if (cmd == 'p') {
                if (pm_power_off)
                        pm_power_off();
+       }
 }
 
 static int cpu_cmd(void)
@@ -1949,15 +1952,14 @@ static void dump_300_sprs(void)
 
        printf("pidr   = %.16lx  tidr  = %.16lx\n",
                mfspr(SPRN_PID), mfspr(SPRN_TIDR));
-       printf("asdr   = %.16lx  psscr = %.16lx\n",
-               mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR)
-                                       : mfspr(SPRN_PSSCR_PR));
+       printf("psscr  = %.16lx\n",
+               hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR));
 
        if (!hv)
                return;
 
-       printf("ptcr   = %.16lx\n",
-               mfspr(SPRN_PTCR));
+       printf("ptcr   = %.16lx  asdr  = %.16lx\n",
+               mfspr(SPRN_PTCR), mfspr(SPRN_ASDR));
 #endif
 }
 
index 1efaedd..ec0ca8c 100644 (file)
@@ -7,7 +7,6 @@ generic-y += div64.h
 generic-y += extable.h
 generic-y += flat.h
 generic-y += dma.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
index 36e638d..b15f70a 100644 (file)
@@ -43,6 +43,13 @@ static inline int pud_bad(pud_t pud)
        return !pud_present(pud);
 }
 
+#define pud_leaf       pud_leaf
+static inline int pud_leaf(pud_t pud)
+{
+       return pud_present(pud) &&
+              (pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
        *pudp = pud;
index f66b873..e430415 100644 (file)
@@ -130,6 +130,13 @@ static inline int pmd_bad(pmd_t pmd)
        return !pmd_present(pmd);
 }
 
+#define pmd_leaf       pmd_leaf
+static inline int pmd_leaf(pmd_t pmd)
+{
+       return pmd_present(pmd) &&
+              (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
        *pmdp = pmd;
index 287714d..8abe775 100644 (file)
@@ -156,6 +156,7 @@ config S390
        select HAVE_KERNEL_UNCOMPRESSED
        select HAVE_KERNEL_XZ
        select HAVE_KPROBES
+       select HAVE_KPROBES_ON_FTRACE
        select HAVE_KRETPROBES
        select HAVE_KVM
        select HAVE_LIVEPATCH
@@ -163,13 +164,13 @@ config S390
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_MEMBLOCK_PHYS_MAP
-       select HAVE_MMU_GATHER_NO_GATHER
+       select MMU_GATHER_NO_GATHER
        select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_NOP_MCOUNT
        select HAVE_OPROFILE
        select HAVE_PCI
        select HAVE_PERF_EVENTS
-       select HAVE_RCU_TABLE_FREE
+       select MMU_GATHER_RCU_TABLE_FREE
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE
        select HAVE_RSEQ
index e2a8578..f3caeb1 100644 (file)
@@ -5,7 +5,7 @@
  * s390 implementation of the AES Cipher Algorithm with protected keys.
  *
  * s390 Version:
- *   Copyright IBM Corp. 2017,2019
+ *   Copyright IBM Corp. 2017,2020
  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *             Harald Freudenberger <freude@de.ibm.com>
  */
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 #include <linux/init.h>
+#include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/xts.h>
  * is called. As paes can handle different kinds of key blobs
  * and padding is also possible, the limits need to be generous.
  */
-#define PAES_MIN_KEYSIZE 64
-#define PAES_MAX_KEYSIZE 256
+#define PAES_MIN_KEYSIZE 16
+#define PAES_MAX_KEYSIZE 320
 
 static u8 *ctrblk;
-static DEFINE_SPINLOCK(ctrblk_lock);
+static DEFINE_MUTEX(ctrblk_lock);
 
 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
 
@@ -53,19 +54,46 @@ struct key_blob {
        unsigned int keylen;
 };
 
-static inline int _copy_key_to_kb(struct key_blob *kb,
-                                 const u8 *key,
-                                 unsigned int keylen)
-{
-       if (keylen <= sizeof(kb->keybuf))
+static inline int _key_to_kb(struct key_blob *kb,
+                            const u8 *key,
+                            unsigned int keylen)
+{
+       struct clearkey_header {
+               u8  type;
+               u8  res0[3];
+               u8  version;
+               u8  res1[3];
+               u32 keytype;
+               u32 len;
+       } __packed * h;
+
+       switch (keylen) {
+       case 16:
+       case 24:
+       case 32:
+               /* clear key value, prepare pkey clear key token in keybuf */
+               memset(kb->keybuf, 0, sizeof(kb->keybuf));
+               h = (struct clearkey_header *) kb->keybuf;
+               h->version = 0x02; /* TOKVER_CLEAR_KEY */
+               h->keytype = (keylen - 8) >> 3;
+               h->len = keylen;
+               memcpy(kb->keybuf + sizeof(*h), key, keylen);
+               kb->keylen = sizeof(*h) + keylen;
                kb->key = kb->keybuf;
-       else {
-               kb->key = kmalloc(keylen, GFP_KERNEL);
-               if (!kb->key)
-                       return -ENOMEM;
+               break;
+       default:
+               /* other key material, let pkey handle this */
+               if (keylen <= sizeof(kb->keybuf))
+                       kb->key = kb->keybuf;
+               else {
+                       kb->key = kmalloc(keylen, GFP_KERNEL);
+                       if (!kb->key)
+                               return -ENOMEM;
+               }
+               memcpy(kb->key, key, keylen);
+               kb->keylen = keylen;
+               break;
        }
-       memcpy(kb->key, key, keylen);
-       kb->keylen = keylen;
 
        return 0;
 }
@@ -82,16 +110,18 @@ static inline void _free_kb_keybuf(struct key_blob *kb)
 struct s390_paes_ctx {
        struct key_blob kb;
        struct pkey_protkey pk;
+       spinlock_t pk_lock;
        unsigned long fc;
 };
 
 struct s390_pxts_ctx {
        struct key_blob kb[2];
        struct pkey_protkey pk[2];
+       spinlock_t pk_lock;
        unsigned long fc;
 };
 
-static inline int __paes_convert_key(struct key_blob *kb,
+static inline int __paes_keyblob2pkey(struct key_blob *kb,
                                     struct pkey_protkey *pk)
 {
        int i, ret;
@@ -106,22 +136,18 @@ static inline int __paes_convert_key(struct key_blob *kb,
        return ret;
 }
 
-static int __paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
 {
-       unsigned long fc;
+       struct pkey_protkey pkey;
 
-       if (__paes_convert_key(&ctx->kb, &ctx->pk))
+       if (__paes_keyblob2pkey(&ctx->kb, &pkey))
                return -EINVAL;
 
-       /* Pick the correct function code based on the protected key type */
-       fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
-               (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
-               (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
-
-       /* Check if the function code is available */
-       ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+       spin_lock_bh(&ctx->pk_lock);
+       memcpy(&ctx->pk, &pkey, sizeof(pkey));
+       spin_unlock_bh(&ctx->pk_lock);
 
-       return ctx->fc ? 0 : -EINVAL;
+       return 0;
 }
 
 static int ecb_paes_init(struct crypto_skcipher *tfm)
@@ -129,6 +155,7 @@ static int ecb_paes_init(struct crypto_skcipher *tfm)
        struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        ctx->kb.key = NULL;
+       spin_lock_init(&ctx->pk_lock);
 
        return 0;
 }
@@ -140,6 +167,24 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm)
        _free_kb_keybuf(&ctx->kb);
 }
 
+static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
+{
+       unsigned long fc;
+
+       if (__paes_convert_key(ctx))
+               return -EINVAL;
+
+       /* Pick the correct function code based on the protected key type */
+       fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
+               (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
+               (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
+
+       /* Check if the function code is available */
+       ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+
+       return ctx->fc ? 0 : -EINVAL;
+}
+
 static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
                            unsigned int key_len)
 {
@@ -147,11 +192,11 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
        struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        _free_kb_keybuf(&ctx->kb);
-       rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+       rc = _key_to_kb(&ctx->kb, in_key, key_len);
        if (rc)
                return rc;
 
-       return __paes_set_key(ctx);
+       return __ecb_paes_set_key(ctx);
 }
 
 static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
@@ -161,18 +206,31 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
        struct skcipher_walk walk;
        unsigned int nbytes, n, k;
        int ret;
+       struct {
+               u8 key[MAXPROTKEYSIZE];
+       } param;
 
        ret = skcipher_walk_virt(&walk, req, false);
+       if (ret)
+               return ret;
+
+       spin_lock_bh(&ctx->pk_lock);
+       memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+       spin_unlock_bh(&ctx->pk_lock);
+
        while ((nbytes = walk.nbytes) != 0) {
                /* only use complete blocks */
                n = nbytes & ~(AES_BLOCK_SIZE - 1);
-               k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
+               k = cpacf_km(ctx->fc | modifier, &param,
                             walk.dst.virt.addr, walk.src.virt.addr, n);
                if (k)
                        ret = skcipher_walk_done(&walk, nbytes - k);
                if (k < n) {
-                       if (__paes_set_key(ctx) != 0)
+                       if (__paes_convert_key(ctx))
                                return skcipher_walk_done(&walk, -EIO);
+                       spin_lock_bh(&ctx->pk_lock);
+                       memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+                       spin_unlock_bh(&ctx->pk_lock);
                }
        }
        return ret;
@@ -210,6 +268,7 @@ static int cbc_paes_init(struct crypto_skcipher *tfm)
        struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        ctx->kb.key = NULL;
+       spin_lock_init(&ctx->pk_lock);
 
        return 0;
 }
@@ -221,11 +280,11 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm)
        _free_kb_keybuf(&ctx->kb);
 }
 
-static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
 {
        unsigned long fc;
 
-       if (__paes_convert_key(&ctx->kb, &ctx->pk))
+       if (__paes_convert_key(ctx))
                return -EINVAL;
 
        /* Pick the correct function code based on the protected key type */
@@ -246,7 +305,7 @@ static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
        struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        _free_kb_keybuf(&ctx->kb);
-       rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+       rc = _key_to_kb(&ctx->kb, in_key, key_len);
        if (rc)
                return rc;
 
@@ -268,8 +327,12 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
        ret = skcipher_walk_virt(&walk, req, false);
        if (ret)
                return ret;
+
        memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
+       spin_lock_bh(&ctx->pk_lock);
        memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+       spin_unlock_bh(&ctx->pk_lock);
+
        while ((nbytes = walk.nbytes) != 0) {
                /* only use complete blocks */
                n = nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -280,9 +343,11 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
                        ret = skcipher_walk_done(&walk, nbytes - k);
                }
                if (k < n) {
-                       if (__cbc_paes_set_key(ctx) != 0)
+                       if (__paes_convert_key(ctx))
                                return skcipher_walk_done(&walk, -EIO);
+                       spin_lock_bh(&ctx->pk_lock);
                        memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+                       spin_unlock_bh(&ctx->pk_lock);
                }
        }
        return ret;
@@ -322,6 +387,7 @@ static int xts_paes_init(struct crypto_skcipher *tfm)
 
        ctx->kb[0].key = NULL;
        ctx->kb[1].key = NULL;
+       spin_lock_init(&ctx->pk_lock);
 
        return 0;
 }
@@ -334,12 +400,27 @@ static void xts_paes_exit(struct crypto_skcipher *tfm)
        _free_kb_keybuf(&ctx->kb[1]);
 }
 
-static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
+static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
+{
+       struct pkey_protkey pkey0, pkey1;
+
+       if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
+           __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
+               return -EINVAL;
+
+       spin_lock_bh(&ctx->pk_lock);
+       memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
+       memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
+       spin_unlock_bh(&ctx->pk_lock);
+
+       return 0;
+}
+
+static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
 {
        unsigned long fc;
 
-       if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) ||
-           __paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
+       if (__xts_paes_convert_key(ctx))
                return -EINVAL;
 
        if (ctx->pk[0].type != ctx->pk[1].type)
@@ -371,10 +452,10 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 
        _free_kb_keybuf(&ctx->kb[0]);
        _free_kb_keybuf(&ctx->kb[1]);
-       rc = _copy_key_to_kb(&ctx->kb[0], in_key, key_len);
+       rc = _key_to_kb(&ctx->kb[0], in_key, key_len);
        if (rc)
                return rc;
-       rc = _copy_key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
+       rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
        if (rc)
                return rc;
 
@@ -416,15 +497,17 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
        ret = skcipher_walk_virt(&walk, req, false);
        if (ret)
                return ret;
+
        keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
        offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
-retry:
+
        memset(&pcc_param, 0, sizeof(pcc_param));
        memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
+       spin_lock_bh(&ctx->pk_lock);
        memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
-       cpacf_pcc(ctx->fc, pcc_param.key + offset);
-
        memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
+       spin_unlock_bh(&ctx->pk_lock);
+       cpacf_pcc(ctx->fc, pcc_param.key + offset);
        memcpy(xts_param.init, pcc_param.xts, 16);
 
        while ((nbytes = walk.nbytes) != 0) {
@@ -435,11 +518,15 @@ retry:
                if (k)
                        ret = skcipher_walk_done(&walk, nbytes - k);
                if (k < n) {
-                       if (__xts_paes_set_key(ctx) != 0)
+                       if (__xts_paes_convert_key(ctx))
                                return skcipher_walk_done(&walk, -EIO);
-                       goto retry;
+                       spin_lock_bh(&ctx->pk_lock);
+                       memcpy(xts_param.key + offset,
+                              ctx->pk[0].protkey, keylen);
+                       spin_unlock_bh(&ctx->pk_lock);
                }
        }
+
        return ret;
 }
 
@@ -476,6 +563,7 @@ static int ctr_paes_init(struct crypto_skcipher *tfm)
        struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        ctx->kb.key = NULL;
+       spin_lock_init(&ctx->pk_lock);
 
        return 0;
 }
@@ -487,11 +575,11 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm)
        _free_kb_keybuf(&ctx->kb);
 }
 
-static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
 {
        unsigned long fc;
 
-       if (__paes_convert_key(&ctx->kb, &ctx->pk))
+       if (__paes_convert_key(ctx))
                return -EINVAL;
 
        /* Pick the correct function code based on the protected key type */
@@ -513,7 +601,7 @@ static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
        struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        _free_kb_keybuf(&ctx->kb);
-       rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+       rc = _key_to_kb(&ctx->kb, in_key, key_len);
        if (rc)
                return rc;
 
@@ -543,49 +631,65 @@ static int ctr_paes_crypt(struct skcipher_request *req)
        struct skcipher_walk walk;
        unsigned int nbytes, n, k;
        int ret, locked;
-
-       locked = spin_trylock(&ctrblk_lock);
+       struct {
+               u8 key[MAXPROTKEYSIZE];
+       } param;
 
        ret = skcipher_walk_virt(&walk, req, false);
+       if (ret)
+               return ret;
+
+       spin_lock_bh(&ctx->pk_lock);
+       memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+       spin_unlock_bh(&ctx->pk_lock);
+
+       locked = mutex_trylock(&ctrblk_lock);
+
        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
                n = AES_BLOCK_SIZE;
                if (nbytes >= 2*AES_BLOCK_SIZE && locked)
                        n = __ctrblk_init(ctrblk, walk.iv, nbytes);
                ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
-               k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr,
+               k = cpacf_kmctr(ctx->fc, &param, walk.dst.virt.addr,
                                walk.src.virt.addr, n, ctrptr);
                if (k) {
                        if (ctrptr == ctrblk)
                                memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
                                       AES_BLOCK_SIZE);
                        crypto_inc(walk.iv, AES_BLOCK_SIZE);
-                       ret = skcipher_walk_done(&walk, nbytes - n);
+                       ret = skcipher_walk_done(&walk, nbytes - k);
                }
                if (k < n) {
-                       if (__ctr_paes_set_key(ctx) != 0) {
+                       if (__paes_convert_key(ctx)) {
                                if (locked)
-                                       spin_unlock(&ctrblk_lock);
+                                       mutex_unlock(&ctrblk_lock);
                                return skcipher_walk_done(&walk, -EIO);
                        }
+                       spin_lock_bh(&ctx->pk_lock);
+                       memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+                       spin_unlock_bh(&ctx->pk_lock);
                }
        }
        if (locked)
-               spin_unlock(&ctrblk_lock);
+               mutex_unlock(&ctrblk_lock);
        /*
         * final block may be < AES_BLOCK_SIZE, copy only nbytes
         */
        if (nbytes) {
                while (1) {
-                       if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf,
+                       if (cpacf_kmctr(ctx->fc, &param, buf,
                                        walk.src.virt.addr, AES_BLOCK_SIZE,
                                        walk.iv) == AES_BLOCK_SIZE)
                                break;
-                       if (__ctr_paes_set_key(ctx) != 0)
+                       if (__paes_convert_key(ctx))
                                return skcipher_walk_done(&walk, -EIO);
+                       spin_lock_bh(&ctx->pk_lock);
+                       memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+                       spin_unlock_bh(&ctx->pk_lock);
                }
                memcpy(walk.dst.virt.addr, buf, nbytes);
                crypto_inc(walk.iv, AES_BLOCK_SIZE);
-               ret = skcipher_walk_done(&walk, 0);
+               ret = skcipher_walk_done(&walk, nbytes);
        }
 
        return ret;
@@ -618,12 +722,12 @@ static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
 
 static void paes_s390_fini(void)
 {
-       if (ctrblk)
-               free_page((unsigned long) ctrblk);
        __crypto_unregister_skcipher(&ctr_paes_alg);
        __crypto_unregister_skcipher(&xts_paes_alg);
        __crypto_unregister_skcipher(&cbc_paes_alg);
        __crypto_unregister_skcipher(&ecb_paes_alg);
+       if (ctrblk)
+               free_page((unsigned long) ctrblk);
 }
 
 static int __init paes_s390_init(void)
@@ -661,14 +765,14 @@ static int __init paes_s390_init(void)
        if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
            cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
            cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
-               ret = crypto_register_skcipher(&ctr_paes_alg);
-               if (ret)
-                       goto out_err;
                ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
                if (!ctrblk) {
                        ret = -ENOMEM;
                        goto out_err;
                }
+               ret = crypto_register_skcipher(&ctr_paes_alg);
+               if (ret)
+                       goto out_err;
        }
 
        return 0;
index 2531f67..1832ae6 100644 (file)
@@ -7,7 +7,6 @@ generated-y += unistd_nr.h
 generic-y += asm-offsets.h
 generic-y += cacheflush.h
 generic-y += device.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += div64.h
 generic-y += emergency-restart.h
index b106aa2..09cdb63 100644 (file)
@@ -54,7 +54,6 @@ typedef u16 kprobe_opcode_t;
 struct arch_specific_insn {
        /* copy of original instruction */
        kprobe_opcode_t *insn;
-       unsigned int is_ftrace_insn : 1;
 };
 
 struct prev_kprobe {
index a4d3809..85e944f 100644 (file)
@@ -33,6 +33,8 @@
 #define ARCH_HAS_PREPARE_HUGEPAGE
 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
 
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
 #include <asm/setup.h>
 #ifndef __ASSEMBLY__
 
index 7b03037..137a392 100644 (file)
@@ -673,6 +673,7 @@ static inline int pud_none(pud_t pud)
        return pud_val(pud) == _REGION3_ENTRY_EMPTY;
 }
 
+#define pud_leaf       pud_large
 static inline int pud_large(pud_t pud)
 {
        if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
@@ -690,6 +691,7 @@ static inline unsigned long pud_pfn(pud_t pud)
        return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
 }
 
+#define pmd_leaf       pmd_large
 static inline int pmd_large(pmd_t pmd)
 {
        return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
index e22f072..d27d7d3 100644 (file)
 #define MAXPROTKEYSIZE 64      /* a protected key blob may be up to 64 bytes */
 #define MAXCLRKEYSIZE  32         /* a clear key value may be up to 32 bytes */
 #define MAXAESCIPHERKEYSIZE 136  /* our aes cipher keys have always 136 bytes */
+#define MINEP11AESKEYBLOBSIZE 256  /* min EP11 AES key blob size  */
+#define MAXEP11AESKEYBLOBSIZE 320  /* max EP11 AES key blob size */
 
-/* Minimum and maximum size of a key blob */
+/* Minimum size of a key blob */
 #define MINKEYBLOBSIZE SECKEYBLOBSIZE
-#define MAXKEYBLOBSIZE MAXAESCIPHERKEYSIZE
 
 /* defines for the type field within the pkey_protkey struct */
 #define PKEY_KEYTYPE_AES_128                 1
@@ -39,6 +40,7 @@
 enum pkey_key_type {
        PKEY_TYPE_CCA_DATA   = (__u32) 1,
        PKEY_TYPE_CCA_CIPHER = (__u32) 2,
+       PKEY_TYPE_EP11       = (__u32) 3,
 };
 
 /* the newer ioctls use a pkey_key_size enum for key size information */
@@ -200,7 +202,7 @@ struct pkey_kblob2pkey {
 
 /*
  * Generate secure key, version 2.
- * Generate either a CCA AES secure key or a CCA AES cipher key.
+ * Generate CCA AES secure key, CCA AES cipher key or EP11 AES secure key.
  * There needs to be a list of apqns given with at least one entry in there.
  * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
  * is not supported. The implementation walks through the list of apqns and
@@ -210,10 +212,13 @@ struct pkey_kblob2pkey {
  * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to
  * generate a list of apqns based on the key type to generate.
  * The keygenflags argument is passed to the low level generation functions
- * individual for the key type and has a key type specific meaning. Currently
- * only CCA AES cipher keys react to this parameter: Use one or more of the
- * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher
- * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * individual for the key type and has a key type specific meaning. When
+ * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_*
+ * flags to widen the export possibilities. By default a cipher key is
+ * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * The keygenflag argument for generating an EP11 AES key should either be 0
+ * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and
+ * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags.
  */
 struct pkey_genseck2 {
        struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets*/
@@ -229,8 +234,8 @@ struct pkey_genseck2 {
 
 /*
  * Generate secure key from clear key value, version 2.
- * Construct a CCA AES secure key or CCA AES cipher key from a given clear key
- * value.
+ * Construct an CCA AES secure key, CCA AES cipher key or EP11 AES secure
+ * key from a given clear key value.
  * There needs to be a list of apqns given with at least one entry in there.
  * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
  * is not supported. The implementation walks through the list of apqns and
@@ -240,10 +245,13 @@ struct pkey_genseck2 {
  * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to
  * generate a list of apqns based on the key type to generate.
  * The keygenflags argument is passed to the low level generation functions
- * individual for the key type and has a key type specific meaning. Currently
- * only CCA AES cipher keys react to this parameter: Use one or more of the
- * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher
- * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * individual for the key type and has a key type specific meaning. When
+ * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_*
+ * flags to widen the export possibilities. By default a cipher key is
+ * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * The keygenflag argument for generating an EP11 AES key should either be 0
+ * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and
+ * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags.
  */
 struct pkey_clr2seck2 {
        struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */
@@ -266,14 +274,19 @@ struct pkey_clr2seck2 {
  * with one apqn able to handle this key.
  * The function also checks for the master key verification patterns
  * of the key matching to the current or alternate mkvp of the apqn.
- * Currently CCA AES secure keys and CCA AES cipher keys are supported.
- * The flags field is updated with some additional info about the apqn mkvp
+ * For CCA AES secure keys and CCA AES cipher keys this means to check
+ * the key's mkvp against the current or old mkvp of the apqns. The flags
+ * field is updated with some additional info about the apqn mkvp
  * match: If the current mkvp matches to the key's mkvp then the
  * PKEY_FLAGS_MATCH_CUR_MKVP bit is set, if the alternate mkvp matches to
  * the key's mkvp the PKEY_FLAGS_MATCH_ALT_MKVP is set. For CCA keys the
  * alternate mkvp is the old master key verification pattern.
  * CCA AES secure keys are also checked to have the CPACF export allowed
  * bit enabled (XPRTCPAC) in the kmf1 field.
+ * EP11 keys are also supported and the wkvp of the key is checked against
+ * the current wkvp of the apqns. There is no alternate for this type of
+ * key and so on a match the flag PKEY_FLAGS_MATCH_CUR_MKVP always is set.
+ * EP11 keys are also checked to have XCP_BLOB_PROTKEY_EXTRACTABLE set.
  * The ioctl returns 0 as long as the given or found apqn matches to
  * matches with the current or alternate mkvp to the key's mkvp. If the given
  * apqn does not match or there is no such apqn found, -1 with errno
@@ -313,16 +326,20 @@ struct pkey_kblob2pkey2 {
 /*
  * Build a list of APQNs based on a key blob given.
  * Is able to find out which type of secure key is given (CCA AES secure
- * key or CCA AES cipher key) and tries to find all matching crypto cards
- * based on the MKVP and maybe other criterias (like CCA AES cipher keys
- * need a CEX5C or higher). The list of APQNs is further filtered by the key's
- * mkvp which needs to match to either the current mkvp or the alternate mkvp
- * (which is the old mkvp on CCA adapters) of the apqns. The flags argument may
- * be used to limit the matching apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is
- * given, only the current mkvp of each apqn is compared. Likewise with the
- * PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it is assumed to
- * return apqns where either the current or the alternate mkvp
+ * key, CCA AES cipher key or EP11 AES key) and tries to find all matching
+ * crypto cards based on the MKVP and maybe other criterias (like CCA AES
+ * cipher keys need a CEX5C or higher, EP11 keys with BLOB_PKEY_EXTRACTABLE
+ * need a CEX7 and EP11 api version 4). The list of APQNs is further filtered
+ * by the key's mkvp which needs to match to either the current mkvp (CCA and
+ * EP11) or the alternate mkvp (old mkvp, CCA adapters only) of the apqns. The
+ * flags argument may be used to limit the matching apqns. If the
+ * PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of each apqn is
+ * compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it
+ * is assumed to return apqns where either the current or the alternate mkvp
  * matches. At least one of the matching flags needs to be given.
+ * The flags argument for EP11 keys has no further action and is currently
+ * ignored (but needs to be given as PKEY_FLAGS_MATCH_CUR_MKVP) as there is only
+ * the wkvp from the key to match against the apqn's wkvp.
  * The list of matching apqns is stored into the space given by the apqns
  * argument and the number of stored entries goes into apqn_entries. If the list
  * is empty (apqn_entries is 0) the apqn_entries field is updated to the number
@@ -356,6 +373,10 @@ struct pkey_apqns4key {
  * If both are given, it is assumed to return apqns where either the
  * current or the alternate mkvp matches. If no match flag is given
  * (flags is 0) the mkvp values are ignored for the match process.
+ * For EP11 keys there is only the current wkvp. So if the apqns should also
+ * match to a given wkvp, then the PKEY_FLAGS_MATCH_CUR_MKVP flag should be
+ * set. The wkvp value is 32 bytes but only the leftmost 16 bytes are compared
+ * against the leftmost 16 byte of the wkvp of the apqn.
  * The list of matching apqns is stored into the space given by the apqns
  * argument and the number of stored entries goes into apqn_entries. If the list
  * is empty (apqn_entries is 0) the apqn_entries field is updated to the number
index f9e5e1f..5a2177e 100644 (file)
@@ -161,17 +161,17 @@ struct ica_xcRB {
  * @payload_len:       Payload length
  */
 struct ep11_cprb {
-       __u16           cprb_len;
-       unsigned char   cprb_ver_id;
-       unsigned char   pad_000[2];
-       unsigned char   flags;
-       unsigned char   func_id[2];
-       __u32           source_id;
-       __u32           target_id;
-       __u32           ret_code;
-       __u32           reserved1;
-       __u32           reserved2;
-       __u32           payload_len;
+       __u16   cprb_len;
+       __u8    cprb_ver_id;
+       __u8    pad_000[2];
+       __u8    flags;
+       __u8    func_id[2];
+       __u32   source_id;
+       __u32   target_id;
+       __u32   ret_code;
+       __u32   reserved1;
+       __u32   reserved2;
+       __u32   payload_len;
 } __attribute__((packed));
 
 /**
@@ -197,13 +197,13 @@ struct ep11_target_dev {
  */
 struct ep11_urb {
        __u16           targets_num;
-       __u64           targets;
+       __u8 __user    *targets;
        __u64           weight;
        __u64           req_no;
        __u64           req_len;
-       __u64           req;
+       __u8 __user    *req;
        __u64           resp_len;
-       __u64           resp;
+       __u8 __user    *resp;
 } __attribute__((packed));
 
 /**
@@ -237,7 +237,9 @@ struct zcrypt_device_matrix_ext {
        struct zcrypt_device_status_ext device[MAX_ZDEV_ENTRIES_EXT];
 };
 
-#define AUTOSELECT 0xFFFFFFFF
+#define AUTOSELECT  0xFFFFFFFF
+#define AUTOSEL_AP  ((__u16) 0xFFFF)
+#define AUTOSEL_DOM ((__u16) 0xFFFF)
 
 #define ZCRYPT_IOCTL_MAGIC 'z'
 
index 1bb85f6..4cd9b1a 100644 (file)
@@ -72,15 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
 #endif
 }
 
-static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_KPROBES
-       if (insn->opc == BREAKPOINT_INSTRUCTION)
-               return 1;
-#endif
-       return 0;
-}
-
 static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
 {
 #ifdef CONFIG_KPROBES
@@ -114,16 +105,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                /* Initial code replacement */
                ftrace_generate_orig_insn(&orig);
                ftrace_generate_nop_insn(&new);
-       } else if (is_kprobe_on_ftrace(&old)) {
-               /*
-                * If we find a breakpoint instruction, a kprobe has been
-                * placed at the beginning of the function. We write the
-                * constant KPROBE_ON_FTRACE_NOP into the remaining four
-                * bytes of the original instruction so that the kprobes
-                * handler can execute a nop, if it reaches this breakpoint.
-                */
-               ftrace_generate_kprobe_call_insn(&orig);
-               ftrace_generate_kprobe_nop_insn(&new);
        } else {
                /* Replace ftrace call with a nop. */
                ftrace_generate_call_insn(&orig, rec->ip);
@@ -142,21 +123,10 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
-       if (is_kprobe_on_ftrace(&old)) {
-               /*
-                * If we find a breakpoint instruction, a kprobe has been
-                * placed at the beginning of the function. We write the
-                * constant KPROBE_ON_FTRACE_CALL into the remaining four
-                * bytes of the original instruction so that the kprobes
-                * handler can execute a brasl if it reaches this breakpoint.
-                */
-               ftrace_generate_kprobe_nop_insn(&orig);
-               ftrace_generate_kprobe_call_insn(&new);
-       } else {
-               /* Replace nop with an ftrace call. */
-               ftrace_generate_nop_insn(&orig);
-               ftrace_generate_call_insn(&new, rec->ip);
-       }
+       /* Replace nop with an ftrace call. */
+       ftrace_generate_nop_insn(&orig);
+       ftrace_generate_call_insn(&new, rec->ip);
+
        /* Verify that the to be replaced code matches what we expect. */
        if (memcmp(&orig, &old, sizeof(old)))
                return -EINVAL;
@@ -241,3 +211,45 @@ int ftrace_disable_ftrace_graph_caller(void)
 }
 
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+               struct ftrace_ops *ops, struct pt_regs *regs)
+{
+       struct kprobe_ctlblk *kcb;
+       struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
+
+       if (unlikely(!p) || kprobe_disabled(p))
+               return;
+
+       if (kprobe_running()) {
+               kprobes_inc_nmissed_count(p);
+               return;
+       }
+
+       __this_cpu_write(current_kprobe, p);
+
+       kcb = get_kprobe_ctlblk();
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+       instruction_pointer_set(regs, ip);
+
+       if (!p->pre_handler || !p->pre_handler(p, regs)) {
+
+               instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
+
+               if (unlikely(p->post_handler)) {
+                       kcb->kprobe_status = KPROBE_HIT_SSDONE;
+                       p->post_handler(p, regs, 0);
+               }
+       }
+       __this_cpu_write(current_kprobe, NULL);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+       p->ainsn.insn = NULL;
+       return 0;
+}
+#endif
index 6f13883..548d0ea 100644 (file)
@@ -56,21 +56,10 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = {
 
 static void copy_instruction(struct kprobe *p)
 {
-       unsigned long ip = (unsigned long) p->addr;
        s64 disp, new_disp;
        u64 addr, new_addr;
 
-       if (ftrace_location(ip) == ip) {
-               /*
-                * If kprobes patches the instruction that is morphed by
-                * ftrace make sure that kprobes always sees the branch
-                * "jg .+24" that skips the mcount block or the "brcl 0,0"
-                * in case of hotpatch.
-                */
-               ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
-               p->ainsn.is_ftrace_insn = 1;
-       } else
-               memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
+       memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
        p->opcode = p->ainsn.insn[0];
        if (!probe_is_insn_relative_long(p->ainsn.insn))
                return;
@@ -136,11 +125,6 @@ int arch_prepare_kprobe(struct kprobe *p)
 }
 NOKPROBE_SYMBOL(arch_prepare_kprobe);
 
-int arch_check_ftrace_location(struct kprobe *p)
-{
-       return 0;
-}
-
 struct swap_insn_args {
        struct kprobe *p;
        unsigned int arm_kprobe : 1;
@@ -149,28 +133,11 @@ struct swap_insn_args {
 static int swap_instruction(void *data)
 {
        struct swap_insn_args *args = data;
-       struct ftrace_insn new_insn, *insn;
        struct kprobe *p = args->p;
-       size_t len;
-
-       new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
-       len = sizeof(new_insn.opc);
-       if (!p->ainsn.is_ftrace_insn)
-               goto skip_ftrace;
-       len = sizeof(new_insn);
-       insn = (struct ftrace_insn *) p->addr;
-       if (args->arm_kprobe) {
-               if (is_ftrace_nop(insn))
-                       new_insn.disp = KPROBE_ON_FTRACE_NOP;
-               else
-                       new_insn.disp = KPROBE_ON_FTRACE_CALL;
-       } else {
-               ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
-               if (insn->disp == KPROBE_ON_FTRACE_NOP)
-                       ftrace_generate_nop_insn(&new_insn);
-       }
-skip_ftrace:
-       s390_kernel_write(p->addr, &new_insn, len);
+       u16 opc;
+
+       opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
+       s390_kernel_write(p->addr, &opc, sizeof(opc));
        return 0;
 }
 NOKPROBE_SYMBOL(swap_instruction);
@@ -464,24 +431,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
        unsigned long ip = regs->psw.addr;
        int fixup = probe_get_fixup_type(p->ainsn.insn);
 
-       /* Check if the kprobes location is an enabled ftrace caller */
-       if (p->ainsn.is_ftrace_insn) {
-               struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
-               struct ftrace_insn call_insn;
-
-               ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
-               /*
-                * A kprobe on an enabled ftrace call site actually single
-                * stepped an unconditional branch (ftrace nop equivalent).
-                * Now we need to fixup things and pretend that a brasl r0,...
-                * was executed instead.
-                */
-               if (insn->disp == KPROBE_ON_FTRACE_CALL) {
-                       ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
-                       regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
-               }
-       }
-
        if (fixup & FIXUP_PSW_NORMAL)
                ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
 
index f942341..7458dcf 100644 (file)
@@ -42,6 +42,9 @@ ENTRY(ftrace_caller)
        .globl  ftrace_regs_caller
        .set    ftrace_regs_caller,ftrace_caller
        stg     %r14,(__SF_GPRS+8*8)(%r15)      # save traced function caller
+       lghi    %r14,0                          # save condition code
+       ipm     %r14                            # don't put any instructions
+       sllg    %r14,%r14,16                    # clobbering CC before this point
        lgr     %r1,%r15
 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
        aghi    %r0,MCOUNT_RETURN_FIXUP
@@ -54,6 +57,9 @@ ENTRY(ftrace_caller)
        # allocate pt_regs and stack frame for ftrace_trace_function
        aghi    %r15,-STACK_FRAME_SIZE
        stg     %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
+       stg     %r14,(STACK_PTREGS_PSW)(%r15)
+       lg      %r14,(__SF_GPRS+8*8)(%r1)       # restore original return address
+       stosm   (STACK_PTREGS_PSW)(%r15),0
        aghi    %r1,-TRACED_FUNC_FRAME_SIZE
        stg     %r1,__SF_BACKCHAIN(%r15)
        stg     %r0,(STACK_PTREGS_PSW+8)(%r15)
index b0246c7..5674710 100644 (file)
@@ -2,7 +2,7 @@
 /*
  *  IBM System z Huge TLB Page Support for Kernel.
  *
- *    Copyright IBM Corp. 2007,2016
+ *    Copyright IBM Corp. 2007,2020
  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  */
 
@@ -11,6 +11,9 @@
 
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/security.h>
 
 /*
  * If the bit selected by single-bit bitmask "a" is set within "x", move
@@ -267,3 +270,98 @@ static __init int setup_hugepagesz(char *opt)
        return 1;
 }
 __setup("hugepagesz=", setup_hugepagesz);
+
+static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+               unsigned long addr, unsigned long len,
+               unsigned long pgoff, unsigned long flags)
+{
+       struct hstate *h = hstate_file(file);
+       struct vm_unmapped_area_info info;
+
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = current->mm->mmap_base;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
+       return vm_unmapped_area(&info);
+}
+
+static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+               unsigned long addr0, unsigned long len,
+               unsigned long pgoff, unsigned long flags)
+{
+       struct hstate *h = hstate_file(file);
+       struct vm_unmapped_area_info info;
+       unsigned long addr;
+
+       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+       info.length = len;
+       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+       info.high_limit = current->mm->mmap_base;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
+       addr = vm_unmapped_area(&info);
+
+       /*
+        * A failed mmap() very likely causes application failure,
+        * so fall back to the bottom-up function here. This scenario
+        * can happen with large stack limits and large mmap()
+        * allocations.
+        */
+       if (addr & ~PAGE_MASK) {
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
+               info.high_limit = TASK_SIZE;
+               addr = vm_unmapped_area(&info);
+       }
+
+       return addr;
+}
+
+unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct hstate *h = hstate_file(file);
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int rc;
+
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+       if (len > TASK_SIZE - mmap_min_addr)
+               return -ENOMEM;
+
+       if (flags & MAP_FIXED) {
+               if (prepare_hugepage_range(file, addr, len))
+                       return -EINVAL;
+               goto check_asce_limit;
+       }
+
+       if (addr) {
+               addr = ALIGN(addr, huge_page_size(h));
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+                   (!vma || addr + len <= vm_start_gap(vma)))
+                       goto check_asce_limit;
+       }
+
+       if (mm->get_unmapped_area == arch_get_unmapped_area)
+               addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
+                               pgoff, flags);
+       else
+               addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
+                               pgoff, flags);
+       if (addr & ~PAGE_MASK)
+               return addr;
+
+check_asce_limit:
+       if (addr + len > current->mm->context.asce_limit &&
+           addr + len <= TASK_SIZE) {
+               rc = crst_table_upgrade(mm, addr + len);
+               if (rc)
+                       return (unsigned long) rc;
+       }
+       return addr;
+}
index ec2b253..fb517b8 100644 (file)
@@ -152,13 +152,12 @@ static ssize_t alignment_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations alignment_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = alignment_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = alignment_proc_write,
+static const struct proc_ops alignment_proc_ops = {
+       .proc_open      = alignment_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = alignment_proc_write,
 };
 
 /*
@@ -176,12 +175,12 @@ static int __init alignment_init(void)
                return -ENOMEM;
 
        res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
-                              &alignment_proc_fops, &se_usermode);
+                              &alignment_proc_ops, &se_usermode);
        if (!res)
                return -ENOMEM;
 
         res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
-                              &alignment_proc_fops, &se_kernmode_warn);
+                              &alignment_proc_ops, &se_kernmode_warn);
         if (!res)
                 return -ENOMEM;
 
index e8c3ea0..c1dd6dd 100644 (file)
@@ -64,8 +64,7 @@ config SPARC64
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_KRETPROBES
        select HAVE_KPROBES
-       select HAVE_RCU_TABLE_FREE if SMP
-       select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
+       select MMU_GATHER_RCU_TABLE_FREE if SMP
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select HAVE_DYNAMIC_FTRACE
index 34ff3b4..65494c3 100644 (file)
@@ -683,6 +683,7 @@ static inline unsigned long pte_special(pte_t pte)
        return pte_val(pte) & _PAGE_SPECIAL;
 }
 
+#define pmd_leaf       pmd_large
 static inline unsigned long pmd_large(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
@@ -867,6 +868,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
 /* only used by the stubbed out hugetlb gup code, should never be called */
 #define p4d_page(p4d)                  NULL
 
+#define pud_leaf       pud_large
 static inline unsigned long pud_large(pud_t pud)
 {
        pte_t pte = __pte(pud_val(pud));
index a2f3fa6..6820d35 100644 (file)
@@ -28,6 +28,15 @@ void flush_tlb_pending(void);
 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 #define tlb_flush(tlb) flush_tlb_pending()
 
+/*
+ * SPARC64's hardware TLB fill does not use the Linux page-tables
+ * and therefore we don't need a TLBI when freeing page-table pages.
+ */
+
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+#define tlb_needs_table_invalidate()   (false)
+#endif
+
 #include <asm-generic/tlb.h>
 
 #endif /* _SPARC64_TLB_H */
index a6292f8..bd48575 100644 (file)
@@ -104,13 +104,12 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
        return count;
 }
 
-static const struct file_operations led_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = led_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = led_proc_write,
+static const struct proc_ops led_proc_ops = {
+       .proc_open      = led_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = led_proc_write,
 };
 
 static struct proc_dir_entry *led;
@@ -121,7 +120,7 @@ static int __init led_init(void)
 {
        timer_setup(&led_blink_timer, led_blink, 0);
 
-       led = proc_create("led", 0, NULL, &led_proc_fops);
+       led = proc_create("led", 0, NULL, &led_proc_ops);
        if (!led)
                return -ENOMEM;
 
index 0117489..b80a1d6 100644 (file)
@@ -752,10 +752,9 @@ static ssize_t mconsole_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations mconsole_proc_fops = {
-       .owner          = THIS_MODULE,
-       .write          = mconsole_proc_write,
-       .llseek         = noop_llseek,
+static const struct proc_ops mconsole_proc_ops = {
+       .proc_write     = mconsole_proc_write,
+       .proc_lseek     = noop_llseek,
 };
 
 static int create_proc_mconsole(void)
@@ -765,7 +764,7 @@ static int create_proc_mconsole(void)
        if (notify_socket == NULL)
                return 0;
 
-       ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_fops);
+       ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_ops);
        if (ent == NULL) {
                printk(KERN_INFO "create_proc_mconsole : proc_create failed\n");
                return 0;
index 369fd84..43edc2a 100644 (file)
@@ -55,20 +55,19 @@ static ssize_t exitcode_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations exitcode_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = exitcode_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = exitcode_proc_write,
+static const struct proc_ops exitcode_proc_ops = {
+       .proc_open      = exitcode_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = exitcode_proc_write,
 };
 
 static int make_proc_exitcode(void)
 {
        struct proc_dir_entry *ent;
 
-       ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops);
+       ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_ops);
        if (ent == NULL) {
                printk(KERN_WARNING "make_proc_exitcode : Failed to register "
                       "/proc/exitcode\n");
index 17045e7..56a0941 100644 (file)
@@ -348,13 +348,12 @@ static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
        return count;
 }
 
-static const struct file_operations sysemu_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = sysemu_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = sysemu_proc_write,
+static const struct proc_ops sysemu_proc_ops = {
+       .proc_open      = sysemu_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = sysemu_proc_write,
 };
 
 int __init make_proc_sysemu(void)
@@ -363,7 +362,7 @@ int __init make_proc_sysemu(void)
        if (!sysemu_supported)
                return 0;
 
-       ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
+       ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
 
        if (ent == NULL)
        {
index 44d2796..beea770 100644 (file)
@@ -120,6 +120,7 @@ config X86
        select GENERIC_IRQ_RESERVATION_MODE
        select GENERIC_IRQ_SHOW
        select GENERIC_PENDING_IRQ              if SMP
+       select GENERIC_PTDUMP
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
@@ -202,7 +203,7 @@ config X86
        select HAVE_PCI
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
-       select HAVE_RCU_TABLE_FREE              if PARAVIRT
+       select MMU_GATHER_RCU_TABLE_FREE                if PARAVIRT
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE         if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
        select HAVE_FUNCTION_ARG_ACCESS_API
index c4eab8e..2e74690 100644 (file)
@@ -62,26 +62,10 @@ config EARLY_PRINTK_USB_XDBC
 config MCSAFE_TEST
        def_bool n
 
-config X86_PTDUMP_CORE
-       def_bool n
-
-config X86_PTDUMP
-       tristate "Export kernel pagetable layout to userspace via debugfs"
-       depends on DEBUG_KERNEL
-       select DEBUG_FS
-       select X86_PTDUMP_CORE
-       ---help---
-         Say Y here if you want to show the kernel pagetable layout in a
-         debugfs file. This information is only useful for kernel developers
-         who are working in architecture specific areas of the kernel.
-         It is probably not a good idea to enable this feature in a production
-         kernel.
-         If in doubt, say "N"
-
 config EFI_PGT_DUMP
        bool "Dump the EFI pagetable"
        depends on EFI
-       select X86_PTDUMP_CORE
+       select PTDUMP_CORE
        ---help---
          Enable this if you want to dump the EFI page table before
          enabling virtual mode. This can be used to debug miscellaneous
@@ -90,7 +74,7 @@ config EFI_PGT_DUMP
 
 config DEBUG_WX
        bool "Warn on W+X mappings at boot"
-       select X86_PTDUMP_CORE
+       select PTDUMP_CORE
        ---help---
          Generate a warning if any W+X mappings are found at boot.
 
index 8b52bc5..ea34464 100644 (file)
@@ -7,7 +7,6 @@ generated-y += unistd_32_ia32.h
 generated-y += unistd_64_x32.h
 generated-y += xen-hypercalls.h
 
-generic-y += dma-contiguous.h
 generic-y += early_ioremap.h
 generic-y += export.h
 generic-y += mcs_spinlock.h
index ad97dc1..7e11866 100644 (file)
@@ -29,8 +29,9 @@
 extern pgd_t early_top_pgt[PTRS_PER_PGD];
 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
 
-void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
-void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
+void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
+void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
+                                  bool user);
 void ptdump_walk_pgd_level_checkwx(void);
 void ptdump_walk_user_pgd_level_checkwx(void);
 
@@ -239,6 +240,7 @@ static inline unsigned long pgd_pfn(pgd_t pgd)
        return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
+#define p4d_leaf       p4d_large
 static inline int p4d_large(p4d_t p4d)
 {
        /* No 512 GiB pages yet */
@@ -247,6 +249,7 @@ static inline int p4d_large(p4d_t p4d)
 
 #define pte_page(pte)  pfn_to_page(pte_pfn(pte))
 
+#define pmd_leaf       pmd_large
 static inline int pmd_large(pmd_t pte)
 {
        return pmd_flags(pte) & _PAGE_PSE;
@@ -874,6 +877,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
        return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
 }
 
+#define pud_leaf       pud_large
 static inline int pud_large(pud_t pud)
 {
        return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -885,6 +889,7 @@ static inline int pud_bad(pud_t pud)
        return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
 }
 #else
+#define pud_leaf       pud_large
 static inline int pud_large(pud_t pud)
 {
        return 0;
@@ -1233,6 +1238,7 @@ static inline bool pgdp_maps_userspace(void *__ptr)
        return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
 }
 
+#define pgd_leaf       pgd_large
 static inline int pgd_large(pgd_t pgd) { return 0; }
 
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
index f23e7aa..820082b 100644 (file)
@@ -29,8 +29,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
  * shootdown, enablement code for several hypervisors overrides
  * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
  * a hypercall. To keep software pagetable walkers safe in this case we
- * switch to RCU based table free (HAVE_RCU_TABLE_FREE). See the comment
- * below 'ifdef CONFIG_HAVE_RCU_TABLE_FREE' in include/asm-generic/tlb.h
+ * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment
+ * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
  * for more details.
  */
 static inline void __tlb_remove_table(void *table)
index da532f6..a5c506f 100644 (file)
@@ -396,15 +396,16 @@ static int mtrr_open(struct inode *inode, struct file *file)
        return single_open(file, mtrr_seq_show, NULL);
 }
 
-static const struct file_operations mtrr_fops = {
-       .owner                  = THIS_MODULE,
-       .open                   = mtrr_open,
-       .read                   = seq_read,
-       .llseek                 = seq_lseek,
-       .write                  = mtrr_write,
-       .unlocked_ioctl         = mtrr_ioctl,
-       .compat_ioctl           = mtrr_ioctl,
-       .release                = mtrr_close,
+static const struct proc_ops mtrr_proc_ops = {
+       .proc_open              = mtrr_open,
+       .proc_read              = seq_read,
+       .proc_lseek             = seq_lseek,
+       .proc_write             = mtrr_write,
+       .proc_ioctl             = mtrr_ioctl,
+#ifdef CONFIG_COMPAT
+       .proc_compat_ioctl      = mtrr_ioctl,
+#endif
+       .proc_release           = mtrr_close,
 };
 
 static int __init mtrr_if_init(void)
@@ -417,7 +418,7 @@ static int __init mtrr_if_init(void)
            (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
                return -ENODEV;
 
-       proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
+       proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_proc_ops);
        return 0;
 }
 arch_initcall(mtrr_if_init);
index 98aecb1..98f7c6f 100644 (file)
@@ -28,8 +28,8 @@ CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
 obj-$(CONFIG_X86_32)           += pgtable_32.o iomap_32.o
 
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
-obj-$(CONFIG_X86_PTDUMP_CORE)  += dump_pagetables.o
-obj-$(CONFIG_X86_PTDUMP)       += debug_pagetables.o
+obj-$(CONFIG_PTDUMP_CORE)      += dump_pagetables.o
+obj-$(CONFIG_PTDUMP_DEBUGFS)   += debug_pagetables.o
 
 obj-$(CONFIG_HIGHMEM)          += highmem_32.o
 
index 39001a4..4a3b62f 100644 (file)
@@ -7,7 +7,7 @@
 
 static int ptdump_show(struct seq_file *m, void *v)
 {
-       ptdump_walk_pgd_level_debugfs(m, NULL, false);
+       ptdump_walk_pgd_level_debugfs(m, &init_mm, false);
        return 0;
 }
 
@@ -15,11 +15,8 @@ DEFINE_SHOW_ATTRIBUTE(ptdump);
 
 static int ptdump_curknl_show(struct seq_file *m, void *v)
 {
-       if (current->mm->pgd) {
-               down_read(&current->mm->mmap_sem);
-               ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false);
-               up_read(&current->mm->mmap_sem);
-       }
+       if (current->mm->pgd)
+               ptdump_walk_pgd_level_debugfs(m, current->mm, false);
        return 0;
 }
 
@@ -28,11 +25,8 @@ DEFINE_SHOW_ATTRIBUTE(ptdump_curknl);
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
 static int ptdump_curusr_show(struct seq_file *m, void *v)
 {
-       if (current->mm->pgd) {
-               down_read(&current->mm->mmap_sem);
-               ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true);
-               up_read(&current->mm->mmap_sem);
-       }
+       if (current->mm->pgd)
+               ptdump_walk_pgd_level_debugfs(m, current->mm, true);
        return 0;
 }
 
@@ -43,7 +37,7 @@ DEFINE_SHOW_ATTRIBUTE(ptdump_curusr);
 static int ptdump_efi_show(struct seq_file *m, void *v)
 {
        if (efi_mm.pgd)
-               ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false);
+               ptdump_walk_pgd_level_debugfs(m, &efi_mm, false);
        return 0;
 }
 
index ab67822..64229da 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/seq_file.h>
 #include <linux/highmem.h>
 #include <linux/pci.h>
+#include <linux/ptdump.h>
 
 #include <asm/e820/types.h>
 #include <asm/pgtable.h>
  * when a "break" in the continuity is found.
  */
 struct pg_state {
+       struct ptdump_state ptdump;
        int level;
-       pgprot_t current_prot;
+       pgprotval_t current_prot;
        pgprotval_t effective_prot;
+       pgprotval_t prot_levels[5];
        unsigned long start_address;
-       unsigned long current_address;
        const struct addr_marker *marker;
        unsigned long lines;
        bool to_dmesg;
        bool check_wx;
        unsigned long wx_pages;
+       struct seq_file *seq;
 };
 
 struct addr_marker {
@@ -174,11 +177,10 @@ static struct addr_marker address_markers[] = {
 /*
  * Print a readable form of a pgprot_t to the seq_file
  */
-static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
+static void printk_prot(struct seq_file *m, pgprotval_t pr, int level, bool dmsg)
 {
-       pgprotval_t pr = pgprot_val(prot);
        static const char * const level_name[] =
-               { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
+               { "pgd", "p4d", "pud", "pmd", "pte" };
 
        if (!(pr & _PAGE_PRESENT)) {
                /* Not present */
@@ -202,12 +204,12 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
                        pt_dump_cont_printf(m, dmsg, "    ");
 
                /* Bit 7 has a different meaning on level 3 vs 4 */
-               if (level <= 4 && pr & _PAGE_PSE)
+               if (level <= 3 && pr & _PAGE_PSE)
                        pt_dump_cont_printf(m, dmsg, "PSE ");
                else
                        pt_dump_cont_printf(m, dmsg, "    ");
-               if ((level == 5 && pr & _PAGE_PAT) ||
-                   ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
+               if ((level == 4 && pr & _PAGE_PAT) ||
+                   ((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE))
                        pt_dump_cont_printf(m, dmsg, "PAT ");
                else
                        pt_dump_cont_printf(m, dmsg, "    ");
@@ -223,24 +225,11 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
        pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
 }
 
-/*
- * On 64 bits, sign-extend the 48 bit address to 64 bit
- */
-static unsigned long normalize_addr(unsigned long u)
-{
-       int shift;
-       if (!IS_ENABLED(CONFIG_X86_64))
-               return u;
-
-       shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
-       return (signed long)(u << shift) >> shift;
-}
-
-static void note_wx(struct pg_state *st)
+static void note_wx(struct pg_state *st, unsigned long addr)
 {
        unsigned long npages;
 
-       npages = (st->current_address - st->start_address) / PAGE_SIZE;
+       npages = (addr - st->start_address) / PAGE_SIZE;
 
 #ifdef CONFIG_PCI_BIOS
        /*
@@ -248,7 +237,7 @@ static void note_wx(struct pg_state *st)
         * Inform about it, but avoid the warning.
         */
        if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
-           st->current_address <= PAGE_OFFSET + BIOS_END) {
+           addr <= PAGE_OFFSET + BIOS_END) {
                pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
                return;
        }
@@ -260,27 +249,47 @@ static void note_wx(struct pg_state *st)
                  (void *)st->start_address);
 }
 
+static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
+{
+       return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
+              ((prot1 | prot2) & _PAGE_NX);
+}
+
 /*
  * This function gets called on a break in a continuous series
  * of PTE entries; the next one is different so we need to
  * print what we collected so far.
  */
-static void note_page(struct seq_file *m, struct pg_state *st,
-                     pgprot_t new_prot, pgprotval_t new_eff, int level)
+static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
+                     unsigned long val)
 {
-       pgprotval_t prot, cur, eff;
+       struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
+       pgprotval_t new_prot, new_eff;
+       pgprotval_t cur, eff;
        static const char units[] = "BKMGTPE";
+       struct seq_file *m = st->seq;
+
+       new_prot = val & PTE_FLAGS_MASK;
+
+       if (level > 0) {
+               new_eff = effective_prot(st->prot_levels[level - 1],
+                                        new_prot);
+       } else {
+               new_eff = new_prot;
+       }
+
+       if (level >= 0)
+               st->prot_levels[level] = new_eff;
 
        /*
         * If we have a "break" in the series, we need to flush the state that
         * we have now. "break" is either changing perms, levels or
         * address space marker.
         */
-       prot = pgprot_val(new_prot);
-       cur = pgprot_val(st->current_prot);
+       cur = st->current_prot;
        eff = st->effective_prot;
 
-       if (!st->level) {
+       if (st->level == -1) {
                /* First entry */
                st->current_prot = new_prot;
                st->effective_prot = new_eff;
@@ -289,14 +298,14 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                st->lines = 0;
                pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
                                   st->marker->name);
-       } else if (prot != cur || new_eff != eff || level != st->level ||
-                  st->current_address >= st->marker[1].start_address) {
+       } else if (new_prot != cur || new_eff != eff || level != st->level ||
+                  addr >= st->marker[1].start_address) {
                const char *unit = units;
                unsigned long delta;
                int width = sizeof(unsigned long) * 2;
 
                if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
-                       note_wx(st);
+                       note_wx(st, addr);
 
                /*
                 * Now print the actual finished series
@@ -306,9 +315,9 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                        pt_dump_seq_printf(m, st->to_dmesg,
                                           "0x%0*lx-0x%0*lx   ",
                                           width, st->start_address,
-                                          width, st->current_address);
+                                          width, addr);
 
-                       delta = st->current_address - st->start_address;
+                       delta = addr - st->start_address;
                        while (!(delta & 1023) && unit[1]) {
                                delta >>= 10;
                                unit++;
@@ -325,7 +334,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                 * such as the start of vmalloc space etc.
                 * This helps in the interpretation.
                 */
-               if (st->current_address >= st->marker[1].start_address) {
+               if (addr >= st->marker[1].start_address) {
                        if (st->marker->max_lines &&
                            st->lines > st->marker->max_lines) {
                                unsigned long nskip =
@@ -341,222 +350,45 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                                           st->marker->name);
                }
 
-               st->start_address = st->current_address;
+               st->start_address = addr;
                st->current_prot = new_prot;
                st->effective_prot = new_eff;
                st->level = level;
        }
 }
 
-static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
-{
-       return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
-              ((prot1 | prot2) & _PAGE_NX);
-}
-
-static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
-                          pgprotval_t eff_in, unsigned long P)
-{
-       int i;
-       pte_t *pte;
-       pgprotval_t prot, eff;
-
-       for (i = 0; i < PTRS_PER_PTE; i++) {
-               st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
-               pte = pte_offset_map(&addr, st->current_address);
-               prot = pte_flags(*pte);
-               eff = effective_prot(eff_in, prot);
-               note_page(m, st, __pgprot(prot), eff, 5);
-               pte_unmap(pte);
-       }
-}
-#ifdef CONFIG_KASAN
-
-/*
- * This is an optimization for KASAN=y case. Since all kasan page tables
- * eventually point to the kasan_early_shadow_page we could call note_page()
- * right away without walking through lower level page tables. This saves
- * us dozens of seconds (minutes for 5-level config) while checking for
- * W+X mapping or reading kernel_page_tables debugfs file.
- */
-static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
-                               void *pt)
-{
-       if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
-           (pgtable_l5_enabled() &&
-                       __pa(pt) == __pa(kasan_early_shadow_p4d)) ||
-           __pa(pt) == __pa(kasan_early_shadow_pud)) {
-               pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
-               note_page(m, st, __pgprot(prot), 0, 5);
-               return true;
-       }
-       return false;
-}
-#else
-static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
-                               void *pt)
-{
-       return false;
-}
-#endif
-
-#if PTRS_PER_PMD > 1
-
-static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
-                          pgprotval_t eff_in, unsigned long P)
-{
-       int i;
-       pmd_t *start, *pmd_start;
-       pgprotval_t prot, eff;
-
-       pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
-       for (i = 0; i < PTRS_PER_PMD; i++) {
-               st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
-               if (!pmd_none(*start)) {
-                       prot = pmd_flags(*start);
-                       eff = effective_prot(eff_in, prot);
-                       if (pmd_large(*start) || !pmd_present(*start)) {
-                               note_page(m, st, __pgprot(prot), eff, 4);
-                       } else if (!kasan_page_table(m, st, pmd_start)) {
-                               walk_pte_level(m, st, *start, eff,
-                                              P + i * PMD_LEVEL_MULT);
-                       }
-               } else
-                       note_page(m, st, __pgprot(0), 0, 4);
-               start++;
-       }
-}
-
-#else
-#define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
-#define pud_large(a) pmd_large(__pmd(pud_val(a)))
-#define pud_none(a)  pmd_none(__pmd(pud_val(a)))
-#endif
-
-#if PTRS_PER_PUD > 1
-
-static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
-                          pgprotval_t eff_in, unsigned long P)
-{
-       int i;
-       pud_t *start, *pud_start;
-       pgprotval_t prot, eff;
-
-       pud_start = start = (pud_t *)p4d_page_vaddr(addr);
-
-       for (i = 0; i < PTRS_PER_PUD; i++) {
-               st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
-               if (!pud_none(*start)) {
-                       prot = pud_flags(*start);
-                       eff = effective_prot(eff_in, prot);
-                       if (pud_large(*start) || !pud_present(*start)) {
-                               note_page(m, st, __pgprot(prot), eff, 3);
-                       } else if (!kasan_page_table(m, st, pud_start)) {
-                               walk_pmd_level(m, st, *start, eff,
-                                              P + i * PUD_LEVEL_MULT);
-                       }
-               } else
-                       note_page(m, st, __pgprot(0), 0, 3);
-
-               start++;
-       }
-}
-
-#else
-#define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
-#define p4d_large(a) pud_large(__pud(p4d_val(a)))
-#define p4d_none(a)  pud_none(__pud(p4d_val(a)))
-#endif
-
-static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
-                          pgprotval_t eff_in, unsigned long P)
-{
-       int i;
-       p4d_t *start, *p4d_start;
-       pgprotval_t prot, eff;
-
-       if (PTRS_PER_P4D == 1)
-               return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
-
-       p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
-
-       for (i = 0; i < PTRS_PER_P4D; i++) {
-               st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
-               if (!p4d_none(*start)) {
-                       prot = p4d_flags(*start);
-                       eff = effective_prot(eff_in, prot);
-                       if (p4d_large(*start) || !p4d_present(*start)) {
-                               note_page(m, st, __pgprot(prot), eff, 2);
-                       } else if (!kasan_page_table(m, st, p4d_start)) {
-                               walk_pud_level(m, st, *start, eff,
-                                              P + i * P4D_LEVEL_MULT);
-                       }
-               } else
-                       note_page(m, st, __pgprot(0), 0, 2);
-
-               start++;
-       }
-}
-
-#define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
-#define pgd_none(a)  (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
-
-static inline bool is_hypervisor_range(int idx)
-{
-#ifdef CONFIG_X86_64
-       /*
-        * A hole in the beginning of kernel address space reserved
-        * for a hypervisor.
-        */
-       return  (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
-               (idx <  pgd_index(GUARD_HOLE_END_ADDR));
-#else
-       return false;
-#endif
-}
-
-static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
+static void ptdump_walk_pgd_level_core(struct seq_file *m,
+                                      struct mm_struct *mm, pgd_t *pgd,
                                       bool checkwx, bool dmesg)
 {
-       pgd_t *start = INIT_PGD;
-       pgprotval_t prot, eff;
-       int i;
-       struct pg_state st = {};
-
-       if (pgd) {
-               start = pgd;
-               st.to_dmesg = dmesg;
-       }
+       const struct ptdump_range ptdump_ranges[] = {
+#ifdef CONFIG_X86_64
 
-       st.check_wx = checkwx;
-       if (checkwx)
-               st.wx_pages = 0;
+#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1))
+#define normalize_addr(u) ((signed long)((u) << normalize_addr_shift) >> \
+                          normalize_addr_shift)
 
-       for (i = 0; i < PTRS_PER_PGD; i++) {
-               st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
-               if (!pgd_none(*start) && !is_hypervisor_range(i)) {
-                       prot = pgd_flags(*start);
-#ifdef CONFIG_X86_PAE
-                       eff = _PAGE_USER | _PAGE_RW;
+       {0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
+       {normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
 #else
-                       eff = prot;
+       {0, ~0UL},
 #endif
-                       if (pgd_large(*start) || !pgd_present(*start)) {
-                               note_page(m, &st, __pgprot(prot), eff, 1);
-                       } else {
-                               walk_p4d_level(m, &st, *start, eff,
-                                              i * PGD_LEVEL_MULT);
-                       }
-               } else
-                       note_page(m, &st, __pgprot(0), 0, 1);
+       {0, 0}
+};
 
-               cond_resched();
-               start++;
-       }
+       struct pg_state st = {
+               .ptdump = {
+                       .note_page      = note_page,
+                       .range          = ptdump_ranges
+               },
+               .level = -1,
+               .to_dmesg       = dmesg,
+               .check_wx       = checkwx,
+               .seq            = m
+       };
+
+       ptdump_walk_pgd(&st.ptdump, mm, pgd);
 
-       /* Flush out the last page */
-       st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
-       note_page(m, &st, __pgprot(0), 0, 0);
        if (!checkwx)
                return;
        if (st.wx_pages)
@@ -566,18 +398,20 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
                pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
 }
 
-void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
+void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm)
 {
-       ptdump_walk_pgd_level_core(m, pgd, false, true);
+       ptdump_walk_pgd_level_core(m, mm, mm->pgd, false, true);
 }
 
-void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
+void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
+                                  bool user)
 {
+       pgd_t *pgd = mm->pgd;
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
        if (user && boot_cpu_has(X86_FEATURE_PTI))
                pgd = kernel_to_user_pgdp(pgd);
 #endif
-       ptdump_walk_pgd_level_core(m, pgd, false, false);
+       ptdump_walk_pgd_level_core(m, mm, pgd, false, false);
 }
 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
 
@@ -592,13 +426,13 @@ void ptdump_walk_user_pgd_level_checkwx(void)
 
        pr_info("x86/mm: Checking user space page tables\n");
        pgd = kernel_to_user_pgdp(pgd);
-       ptdump_walk_pgd_level_core(NULL, pgd, true, false);
+       ptdump_walk_pgd_level_core(NULL, &init_mm, pgd, true, false);
 #endif
 }
 
 void ptdump_walk_pgd_level_checkwx(void)
 {
-       ptdump_walk_pgd_level_core(NULL, NULL, true, false);
+       ptdump_walk_pgd_level_core(NULL, &init_mm, INIT_PGD, true, false);
 }
 
 static int __init pt_dump_init(void)
index 71dddd1..081d466 100644 (file)
@@ -49,7 +49,7 @@ void efi_sync_low_kernel_mappings(void) {}
 void __init efi_dump_pagetable(void)
 {
 #ifdef CONFIG_EFI_PGT_DUMP
-       ptdump_walk_pgd_level(NULL, swapper_pg_dir);
+       ptdump_walk_pgd_level(NULL, &init_mm);
 #endif
 }
 
index e2accfe..fa8506e 100644 (file)
@@ -471,9 +471,9 @@ void __init efi_dump_pagetable(void)
 {
 #ifdef CONFIG_EFI_PGT_DUMP
        if (efi_have_uv1_memmap())
-               ptdump_walk_pgd_level(NULL, swapper_pg_dir);
+               ptdump_walk_pgd_level(NULL, &init_mm);
        else
-               ptdump_walk_pgd_level(NULL, efi_mm.pgd);
+               ptdump_walk_pgd_level(NULL, &efi_mm);
 #endif
 }
 
index 5f0a96b..1fd321f 100644 (file)
@@ -1668,12 +1668,12 @@ static int tunables_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-static const struct file_operations proc_uv_ptc_operations = {
-       .open           = ptc_proc_open,
-       .read           = seq_read,
-       .write          = ptc_proc_write,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops uv_ptc_proc_ops = {
+       .proc_open      = ptc_proc_open,
+       .proc_read      = seq_read,
+       .proc_write     = ptc_proc_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 static const struct file_operations tunables_fops = {
@@ -1691,7 +1691,7 @@ static int __init uv_ptc_init(void)
                return 0;
 
        proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
-                                 &proc_uv_ptc_operations);
+                                 &uv_ptc_proc_ops);
        if (!proc_uv_ptc) {
                pr_err("unable to create %s proc entry\n",
                       UV_PTC_BASENAME);
index 3acc31e..271917c 100644 (file)
@@ -4,7 +4,6 @@ generic-y += bug.h
 generic-y += compat.h
 generic-y += device.h
 generic-y += div64.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
index f9cd458..8331098 100644 (file)
@@ -251,10 +251,10 @@ out_free:
        return err;
 }
 
-static const struct file_operations fops = {
-       .read = proc_read_simdisk,
-       .write = proc_write_simdisk,
-       .llseek = default_llseek,
+static const struct proc_ops simdisk_proc_ops = {
+       .proc_read      = proc_read_simdisk,
+       .proc_write     = proc_write_simdisk,
+       .proc_lseek     = default_llseek,
 };
 
 static int __init simdisk_setup(struct simdisk *dev, int which,
@@ -290,7 +290,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
        set_capacity(dev->gd, 0);
        add_disk(dev->gd);
 
-       dev->procfile = proc_create_data(tmp, 0644, procdir, &fops, dev);
+       dev->procfile = proc_create_data(tmp, 0644, procdir, &simdisk_proc_ops, dev);
        return 0;
 
 out_alloc_disk:
index e1419ed..09b69a3 100644 (file)
@@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
                kfree(bfqg);
 }
 
-static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+void bfqg_and_blkg_get(struct bfq_group *bfqg)
 {
        /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
        bfqg_get(bfqg);
@@ -651,9 +651,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
                                false, BFQQE_PREEMPTED);
 
+       /*
+        * get extra reference to prevent bfqq from being freed in
+        * next possible deactivate
+        */
+       bfqq->ref++;
+
        if (bfq_bfqq_busy(bfqq))
                bfq_deactivate_bfqq(bfqd, bfqq, false, false);
-       else if (entity->on_st)
+       else if (entity->on_st_or_in_serv)
                bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
        bfqg_and_blkg_put(bfqq_group(bfqq));
 
@@ -670,6 +676,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 
        if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
                bfq_schedule_dispatch(bfqd);
+       /* release extra ref taken above */
+       bfq_put_queue(bfqq);
 }
 
 /**
@@ -1398,6 +1406,10 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
        return bfqq->bfqd->root_group;
 }
 
+void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
+
+void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
+
 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
 {
        struct bfq_group *bfqg;
index 4686b68..8c436ab 100644 (file)
@@ -613,6 +613,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
                bfqq->pos_root = NULL;
        }
 
+       /* oom_bfqq does not participate in queue merging */
+       if (bfqq == &bfqd->oom_bfqq)
+               return;
+
        /*
         * bfqq cannot be merged any longer (see comments in
         * bfq_setup_cooperator): no point in adding bfqq into the
@@ -1055,7 +1059,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
 
 static int bfqq_process_refs(struct bfq_queue *bfqq)
 {
-       return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
+       return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv -
                (bfqq->weight_counter != NULL);
 }
 
@@ -3443,6 +3447,10 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
 static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
                                                 struct bfq_queue *bfqq)
 {
+       /* No point in idling for bfqq if it won't get requests any longer */
+       if (unlikely(!bfqq_process_refs(bfqq)))
+               return false;
+
        return (bfqq->wr_coeff > 1 &&
                (bfqd->wr_busy_queues <
                 bfq_tot_busy_queues(bfqd) ||
@@ -4076,6 +4084,10 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
                bfqq_sequential_and_IO_bound,
                idling_boosts_thr;
 
+       /* No point in idling for bfqq if it won't get requests any longer */
+       if (unlikely(!bfqq_process_refs(bfqq)))
+               return false;
+
        bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
                bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
 
@@ -4169,6 +4181,10 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
        struct bfq_data *bfqd = bfqq->bfqd;
        bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
 
+       /* No point in idling for bfqq if it won't get requests any longer */
+       if (unlikely(!bfqq_process_refs(bfqq)))
+               return false;
+
        if (unlikely(bfqd->strict_guarantees))
                return true;
 
@@ -4809,9 +4825,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
 {
        struct bfq_queue *item;
        struct hlist_node *n;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
        struct bfq_group *bfqg = bfqq_group(bfqq);
-#endif
 
        if (bfqq->bfqd)
                bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
@@ -4884,9 +4898,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
                bfqq->bfqd->last_completed_rq_bfqq = NULL;
 
        kmem_cache_free(bfq_pool, bfqq);
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
        bfqg_and_blkg_put(bfqg);
-#endif
 }
 
 static void bfq_put_cooperator(struct bfq_queue *bfqq)
@@ -5967,6 +5979,8 @@ static void bfq_finish_requeue_request(struct request *rq)
 }
 
 /*
+ * Removes the association between the current task and bfqq, assuming
+ * that bic points to the bfq iocontext of the task.
  * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
  * was the last process referring to that bfqq.
  */
@@ -6374,10 +6388,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
 
        hrtimer_cancel(&bfqd->idle_slice_timer);
 
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
        /* release oom-queue reference to root group */
        bfqg_and_blkg_put(bfqd->root_group);
 
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
        blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
 #else
        spin_lock_irq(&bfqd->lock);
index 8526f20..d1233af 100644 (file)
@@ -150,7 +150,7 @@ struct bfq_entity {
         * Flag, true if the entity is on a tree (either the active or
         * the idle one of its service_tree) or is in service.
         */
-       bool on_st;
+       bool on_st_or_in_serv;
 
        /* B-WF2Q+ start and finish timestamps [sectors/weight] */
        u64 start, finish;
@@ -921,6 +921,7 @@ struct bfq_group {
 
 #else
 struct bfq_group {
+       struct bfq_entity entity;
        struct bfq_sched_data sched_data;
 
        struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
@@ -984,6 +985,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
+void bfqg_and_blkg_get(struct bfq_group *bfqg);
 void bfqg_and_blkg_put(struct bfq_group *bfqg);
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
index ffe9ce9..eb0e2a6 100644 (file)
@@ -533,7 +533,9 @@ static void bfq_get_entity(struct bfq_entity *entity)
                bfqq->ref++;
                bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
                             bfqq, bfqq->ref);
-       }
+       } else
+               bfqg_and_blkg_get(container_of(entity, struct bfq_group,
+                                              entity));
 }
 
 /**
@@ -645,10 +647,16 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
 {
        struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
 
-       entity->on_st = false;
+       entity->on_st_or_in_serv = false;
        st->wsum -= entity->weight;
-       if (bfqq && !is_in_service)
+       if (is_in_service)
+               return;
+
+       if (bfqq)
                bfq_put_queue(bfqq);
+       else
+               bfqg_and_blkg_put(container_of(entity, struct bfq_group,
+                                              entity));
 }
 
 /**
@@ -999,7 +1007,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
                 */
                bfq_get_entity(entity);
 
-               entity->on_st = true;
+               entity->on_st_or_in_serv = true;
        }
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -1165,7 +1173,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
        struct bfq_service_tree *st;
        bool is_in_service;
 
-       if (!entity->on_st) /* entity never activated, or already inactive */
+       if (!entity->on_st_or_in_serv) /*
+                                       * entity never activated, or
+                                       * already inactive
+                                       */
                return false;
 
        /*
@@ -1620,7 +1631,7 @@ bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
         * service tree either, then release the service reference to
         * the queue it represents (taken with bfq_get_entity).
         */
-       if (!in_serv_entity->on_st) {
+       if (!in_serv_entity->on_st_or_in_serv) {
                /*
                 * If no process is referencing in_serv_bfqq any
                 * longer, then the service reference may be the only
index 3d8e530..439367a 100644 (file)
@@ -171,7 +171,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
 
        type = alg_get_type(sa->salg_type);
-       if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
+       if (PTR_ERR(type) == -ENOENT) {
                request_module("algif-%s", sa->salg_type);
                type = alg_get_type(sa->salg_type);
        }
index 6078064..ed3d2d1 100644 (file)
@@ -11,6 +11,7 @@
 #define pr_fmt(fmt)    "ACPI: IORT: " fmt
 
 #include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
 #include <linux/iommu.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -902,9 +903,9 @@ static inline bool iort_iommu_driver_enabled(u8 type)
 {
        switch (type) {
        case ACPI_IORT_NODE_SMMU_V3:
-               return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
+               return IS_ENABLED(CONFIG_ARM_SMMU_V3);
        case ACPI_IORT_NODE_SMMU:
-               return IS_BUILTIN(CONFIG_ARM_SMMU);
+               return IS_ENABLED(CONFIG_ARM_SMMU);
        default:
                pr_warn("IORT node type %u does not describe an SMMU\n", type);
                return false;
@@ -976,6 +977,20 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
        return iort_iommu_xlate(info->dev, parent, streamid);
 }
 
+static void iort_named_component_init(struct device *dev,
+                                     struct acpi_iort_node *node)
+{
+       struct acpi_iort_named_component *nc;
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+       if (!fwspec)
+               return;
+
+       nc = (struct acpi_iort_named_component *)node->node_data;
+       fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
+                                          nc->node_flags);
+}
+
 /**
  * iort_iommu_configure - Set-up IOMMU configuration for a device.
  *
@@ -1030,6 +1045,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
                        if (parent)
                                err = iort_iommu_xlate(dev, parent, streamid);
                } while (parent && !err);
+
+               if (!err)
+                       iort_named_component_init(dev, node);
        }
 
        /*
index 15cc7d5..111a407 100644 (file)
@@ -1202,13 +1202,12 @@ static int acpi_battery_alarm_proc_open(struct inode *inode, struct file *file)
        return single_open(file, acpi_battery_alarm_proc_show, PDE_DATA(inode));
 }
 
-static const struct file_operations acpi_battery_alarm_fops = {
-       .owner          = THIS_MODULE,
-       .open           = acpi_battery_alarm_proc_open,
-       .read           = seq_read,
-       .write          = acpi_battery_write_alarm,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops acpi_battery_alarm_proc_ops = {
+       .proc_open      = acpi_battery_alarm_proc_open,
+       .proc_read      = seq_read,
+       .proc_write     = acpi_battery_write_alarm,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 static int acpi_battery_add_fs(struct acpi_device *device)
@@ -1228,7 +1227,7 @@ static int acpi_battery_add_fs(struct acpi_device *device)
                        acpi_battery_state_proc_show, acpi_driver_data(device)))
                return -ENODEV;
        if (!proc_create_data("alarm", S_IFREG | S_IRUGO | S_IWUSR,
-                       acpi_device_dir(device), &acpi_battery_alarm_fops,
+                       acpi_device_dir(device), &acpi_battery_alarm_proc_ops,
                        acpi_driver_data(device)))
                return -ENODEV;
        return 0;
index 652f19e..0e62ef2 100644 (file)
@@ -136,18 +136,17 @@ acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
                           PDE_DATA(inode));
 }
 
-static const struct file_operations acpi_system_wakeup_device_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_system_wakeup_device_open_fs,
-       .read = seq_read,
-       .write = acpi_system_write_wakeup_device,
-       .llseek = seq_lseek,
-       .release = single_release,
+static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
+       .proc_open      = acpi_system_wakeup_device_open_fs,
+       .proc_read      = seq_read,
+       .proc_write     = acpi_system_write_wakeup_device,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 void __init acpi_sleep_proc_init(void)
 {
        /* 'wakeup device' [R/W] */
        proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
-                   acpi_root_dir, &acpi_system_wakeup_device_fops);
+                   acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
 }
index 915650b..6d34488 100644 (file)
@@ -1462,7 +1462,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
        iort_dma_setup(dev, &dma_addr, &size);
 
        iommu = iort_iommu_configure(dev);
-       if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+       if (PTR_ERR(iommu) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
        arch_setup_dma_ops(dev, dma_addr, size,
index 4bfd1b1..11ea1af 100644 (file)
@@ -81,6 +81,7 @@ enum board_ids {
 
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void ahci_remove_one(struct pci_dev *dev);
+static void ahci_shutdown_one(struct pci_dev *dev);
 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
                                 unsigned long deadline);
 static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -606,6 +607,7 @@ static struct pci_driver ahci_pci_driver = {
        .id_table               = ahci_pci_tbl,
        .probe                  = ahci_init_one,
        .remove                 = ahci_remove_one,
+       .shutdown               = ahci_shutdown_one,
        .driver = {
                .pm             = &ahci_pci_pm_ops,
        },
@@ -1877,6 +1879,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 }
 
+static void ahci_shutdown_one(struct pci_dev *pdev)
+{
+       ata_pci_shutdown_one(pdev);
+}
+
 static void ahci_remove_one(struct pci_dev *pdev)
 {
        pm_runtime_get_noresume(&pdev->dev);
index 6f4ab5c..42c8728 100644 (file)
@@ -6767,6 +6767,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
        ata_host_detach(host);
 }
 
+void ata_pci_shutdown_one(struct pci_dev *pdev)
+{
+       struct ata_host *host = pci_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < host->n_ports; i++) {
+               struct ata_port *ap = host->ports[i];
+
+               ap->pflags |= ATA_PFLAG_FROZEN;
+
+               /* Disable port interrupts */
+               if (ap->ops->freeze)
+                       ap->ops->freeze(ap);
+
+               /* Stop the port DMA engines */
+               if (ap->ops->port_stop)
+                       ap->ops->port_stop(ap);
+       }
+}
+
 /* move to PCI subsystem */
 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
 {
@@ -7387,6 +7407,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
 
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
 #ifdef CONFIG_PM
 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
index 391dff0..e9cf31f 100644 (file)
@@ -526,9 +526,10 @@ static void data_xfer(struct work_struct *work)
 
        /* request dma channels */
        /* dma_request_channel may sleep, so calling from process context */
-       acdev->dma_chan = dma_request_slave_channel(acdev->host->dev, "data");
-       if (!acdev->dma_chan) {
+       acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
+       if (IS_ERR(acdev->dma_chan)) {
                dev_err(acdev->host->dev, "Unable to get dma_chan\n");
+               acdev->dma_chan = NULL;
                goto chan_request_fail;
        }
 
@@ -539,6 +540,7 @@ static void data_xfer(struct work_struct *work)
        }
 
        dma_release_channel(acdev->dma_chan);
+       acdev->dma_chan = NULL;
 
        /* data xferred successfully */
        if (!ret) {
index 3fe0754..8eb066a 100644 (file)
@@ -309,6 +309,7 @@ static const struct pcmcia_device_id pcmcia_devices[] = {
        PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000),        /* Toshiba */
        PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
        PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000),        /* Samsung */
+       PCMCIA_DEVICE_MANF_CARD(0x00f1, 0x0101),        /* SanDisk High (>8G) CFA */
        PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000),        /* Hitachi */
        PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
        PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100),        /* Viking CFA */
index 1565930..b9f474c 100644 (file)
@@ -376,7 +376,6 @@ static ssize_t valid_zones_show(struct device *dev,
        struct memory_block *mem = to_memory_block(dev);
        unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-       unsigned long valid_start_pfn, valid_end_pfn;
        struct zone *default_zone;
        int nid;
 
@@ -389,11 +388,11 @@ static ssize_t valid_zones_show(struct device *dev,
                 * The block contains more than one zone can not be offlined.
                 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
                 */
-               if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
-                                         &valid_start_pfn, &valid_end_pfn))
+               default_zone = test_pages_in_a_zone(start_pfn,
+                                                   start_pfn + nr_pages);
+               if (!default_zone)
                        return sprintf(buf, "none\n");
-               start_pfn = valid_start_pfn;
-               strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
+               strcat(buf, default_zone->name);
                goto out;
        }
 
index a8730cc..220c5e1 100644 (file)
@@ -473,6 +473,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
        return kobj;
 }
 
+static inline void brd_check_and_reset_par(void)
+{
+       if (unlikely(!max_part))
+               max_part = 1;
+
+       /*
+        * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
+        * otherwise, it is possiable to get same dev_t when adding partitions.
+        */
+       if ((1U << MINORBITS) % max_part != 0)
+               max_part = 1UL << fls(max_part);
+
+       if (max_part > DISK_MAX_PARTS) {
+               pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
+                       DISK_MAX_PARTS, DISK_MAX_PARTS);
+               max_part = DISK_MAX_PARTS;
+       }
+}
+
 static int __init brd_init(void)
 {
        struct brd_device *brd, *next;
@@ -496,8 +515,7 @@ static int __init brd_init(void)
        if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
                return -EIO;
 
-       if (unlikely(!max_part))
-               max_part = 1;
+       brd_check_and_reset_par();
 
        for (i = 0; i < rd_nr; i++) {
                brd = brd_alloc(i);
index ddbf560..aae99a2 100644 (file)
@@ -622,7 +622,7 @@ struct fifo_buffer {
        int total; /* sum of all values */
        int values[0];
 };
-extern struct fifo_buffer *fifo_alloc(int fifo_size);
+extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
 
 /* flag bits per connection */
 enum {
index de2f94d..da4a3eb 100644 (file)
@@ -1575,7 +1575,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
        struct drbd_device *device;
        struct disk_conf *new_disk_conf, *old_disk_conf;
        struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
-       int err, fifo_size;
+       int err;
+       unsigned int fifo_size;
 
        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
index 2b3103c..79e2164 100644 (file)
@@ -3887,7 +3887,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
        struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
        const int apv = connection->agreed_pro_version;
        struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
-       int fifo_size = 0;
+       unsigned int fifo_size = 0;
        int err;
 
        peer_device = conn_peer_device(connection, pi->vnr);
index 5bdcc70..b7f605c 100644 (file)
@@ -482,11 +482,11 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
                fb->values[i] += value;
 }
 
-struct fifo_buffer *fifo_alloc(int fifo_size)
+struct fifo_buffer *fifo_alloc(unsigned int fifo_size)
 {
        struct fifo_buffer *fb;
 
-       fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO);
+       fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO);
        if (!fb)
                return NULL;
 
index b4607dd..7818190 100644 (file)
@@ -1265,6 +1265,16 @@ static int nbd_start_device(struct nbd_device *nbd)
                args = kzalloc(sizeof(*args), GFP_KERNEL);
                if (!args) {
                        sock_shutdown(nbd);
+                       /*
+                        * If num_connections is m (2 < m),
+                        * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
+                        * But NO.(n + 1) failed. We still have n recv threads.
+                        * So, add flush_workqueue here to prevent recv threads
+                        * dropping the last config_refs and trying to destroy
+                        * the workqueue from inside the workqueue.
+                        */
+                       if (i)
+                               flush_workqueue(nbd->recv_workq);
                        return -ENOMEM;
                }
                sk_set_memalloc(config->socks[i]->sock->sk);
index ae8d4bc..1651079 100644 (file)
@@ -263,34 +263,34 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
 }
 
 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
-#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY)                                   \
-static ssize_t                                                                 \
-nullb_device_##NAME##_show(struct config_item *item, char *page)               \
-{                                                                              \
-       return nullb_device_##TYPE##_attr_show(                                 \
-                               to_nullb_device(item)->NAME, page);             \
-}                                                                              \
-static ssize_t                                                                 \
-nullb_device_##NAME##_store(struct config_item *item, const char *page,                \
-                           size_t count)                                       \
-{                                                                              \
-       int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;      \
-       struct nullb_device *dev = to_nullb_device(item);                       \
-       TYPE new_value;                                                         \
-       int ret;                                                                \
-                                                                               \
-       ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);        \
-       if (ret < 0)                                                            \
-               return ret;                                                     \
-       if (apply_fn)                                                           \
-               ret = apply_fn(dev, new_value);                                 \
-       else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags))                \
-               ret = -EBUSY;                                                   \
-       if (ret < 0)                                                            \
-               return ret;                                                     \
-       dev->NAME = new_value;                                                  \
-       return count;                                                           \
-}                                                                              \
+#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY)                           \
+static ssize_t                                                         \
+nullb_device_##NAME##_show(struct config_item *item, char *page)       \
+{                                                                      \
+       return nullb_device_##TYPE##_attr_show(                         \
+                               to_nullb_device(item)->NAME, page);     \
+}                                                                      \
+static ssize_t                                                         \
+nullb_device_##NAME##_store(struct config_item *item, const char *page,        \
+                           size_t count)                               \
+{                                                                      \
+       int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
+       struct nullb_device *dev = to_nullb_device(item);               \
+       TYPE uninitialized_var(new_value);                              \
+       int ret;                                                        \
+                                                                       \
+       ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
+       if (ret < 0)                                                    \
+               return ret;                                             \
+       if (apply_fn)                                                   \
+               ret = apply_fn(dev, new_value);                         \
+       else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags))        \
+               ret = -EBUSY;                                           \
+       if (ret < 0)                                                    \
+               return ret;                                             \
+       dev->NAME = new_value;                                          \
+       return count;                                                   \
+}                                                                      \
 CONFIGFS_ATTR(nullb_device_, NAME);
 
 static int nullb_apply_submit_queues(struct nullb_device *dev,
index 2b18456..405b66e 100644 (file)
@@ -2662,7 +2662,7 @@ static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
                               u64 off, u64 len)
 {
        struct ceph_file_extent ex = { off, len };
-       union rbd_img_fill_iter dummy;
+       union rbd_img_fill_iter dummy = {};
        struct rbd_img_fill_ctx fctx = {
                .pos_type = OBJ_REQUEST_NODATA,
                .pos = &dummy,
@@ -7143,7 +7143,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        if (rc)
                goto err_out_image_lock;
 
-       add_disk(rbd_dev->disk);
+       device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
        /* see rbd_init_disk() */
        blk_put_queue(rbd_dev->disk->queue);
 
index 716b99a..c2f7126 100644 (file)
@@ -62,8 +62,8 @@
  * IO workloads.
  */
 
-static int xen_blkif_max_buffer_pages = 1024;
-module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
+static int max_buffer_pages = 1024;
+module_param_named(max_buffer_pages, max_buffer_pages, int, 0644);
 MODULE_PARM_DESC(max_buffer_pages,
 "Maximum number of free pages to keep in each block backend buffer");
 
@@ -78,8 +78,8 @@ MODULE_PARM_DESC(max_buffer_pages,
  * algorithm.
  */
 
-static int xen_blkif_max_pgrants = 1056;
-module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
+static int max_pgrants = 1056;
+module_param_named(max_persistent_grants, max_pgrants, int, 0644);
 MODULE_PARM_DESC(max_persistent_grants,
                  "Maximum number of grants to map persistently");
 
@@ -88,8 +88,8 @@ MODULE_PARM_DESC(max_persistent_grants,
  * use. The time is in seconds, 0 means indefinitely long.
  */
 
-static unsigned int xen_blkif_pgrant_timeout = 60;
-module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
+static unsigned int pgrant_timeout = 60;
+module_param_named(persistent_grant_unused_seconds, pgrant_timeout,
                   uint, 0644);
 MODULE_PARM_DESC(persistent_grant_unused_seconds,
                 "Time in seconds an unused persistent grant is allowed to "
@@ -137,9 +137,8 @@ module_param(log_stats, int, 0644);
 
 static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
 {
-       return xen_blkif_pgrant_timeout &&
-              (jiffies - persistent_gnt->last_used >=
-               HZ * xen_blkif_pgrant_timeout);
+       return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
+                       HZ * pgrant_timeout);
 }
 
 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
@@ -234,7 +233,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
        struct persistent_gnt *this;
        struct xen_blkif *blkif = ring->blkif;
 
-       if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
+       if (ring->persistent_gnt_c >= max_pgrants) {
                if (!blkif->vbd.overflow_max_grants)
                        blkif->vbd.overflow_max_grants = 1;
                return -EBUSY;
@@ -397,14 +396,13 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
                goto out;
        }
 
-       if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
-           (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
+       if (ring->persistent_gnt_c < max_pgrants ||
+           (ring->persistent_gnt_c == max_pgrants &&
            !ring->blkif->vbd.overflow_max_grants)) {
                num_clean = 0;
        } else {
-               num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
-               num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
-                           num_clean;
+               num_clean = (max_pgrants / 100) * LRU_PERCENT_CLEAN;
+               num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
                num_clean = min(ring->persistent_gnt_c, num_clean);
                pr_debug("Going to purge at least %u persistent grants\n",
                         num_clean);
@@ -599,8 +597,7 @@ static void print_stats(struct xen_blkif_ring *ring)
                 current->comm, ring->st_oo_req,
                 ring->st_rd_req, ring->st_wr_req,
                 ring->st_f_req, ring->st_ds_req,
-                ring->persistent_gnt_c,
-                xen_blkif_max_pgrants);
+                ring->persistent_gnt_c, max_pgrants);
        ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
        ring->st_rd_req = 0;
        ring->st_wr_req = 0;
@@ -656,8 +653,11 @@ purge_gnt_list:
                        ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
                }
 
-               /* Shrink if we have more than xen_blkif_max_buffer_pages */
-               shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
+               /* Shrink the free pages pool if it is too large. */
+               if (time_before(jiffies, blkif->buffer_squeeze_end))
+                       shrink_free_pagepool(ring, 0);
+               else
+                       shrink_free_pagepool(ring, max_buffer_pages);
 
                if (log_stats && time_after(jiffies, ring->st_print))
                        print_stats(ring);
@@ -884,7 +884,7 @@ again:
                        continue;
                }
                if (use_persistent_gnts &&
-                   ring->persistent_gnt_c < xen_blkif_max_pgrants) {
+                   ring->persistent_gnt_c < max_pgrants) {
                        /*
                         * We are using persistent grants, the grant is
                         * not mapped but we might have room for it.
@@ -911,7 +911,7 @@ again:
                        pages[seg_idx]->persistent_gnt = persistent_gnt;
                        pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
                                 persistent_gnt->gnt, ring->persistent_gnt_c,
-                                xen_blkif_max_pgrants);
+                                max_pgrants);
                        goto next;
                }
                if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
index 49132b0..a3eeccf 100644 (file)
@@ -319,6 +319,7 @@ struct xen_blkif {
        /* All rings for this device. */
        struct xen_blkif_ring   *rings;
        unsigned int            nr_rings;
+       unsigned long           buffer_squeeze_end;
 };
 
 struct seg_buf {
index 4c5d99f..42944d4 100644 (file)
@@ -467,7 +467,6 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
        device_remove_file(&dev->dev, &dev_attr_physical_device);
 }
 
-
 static void xen_vbd_free(struct xen_vbd *vbd)
 {
        if (vbd->bdev)
@@ -524,6 +523,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
                handle, blkif->domid);
        return 0;
 }
+
 static int xen_blkbk_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -607,6 +607,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
        if (err)
                dev_warn(&dev->dev, "writing feature-discard (%d)", err);
 }
+
 int xen_blkbk_barrier(struct xenbus_transaction xbt,
                      struct backend_info *be, int state)
 {
@@ -691,7 +692,6 @@ fail:
        return err;
 }
 
-
 /*
  * Callback received when the hotplug scripts have placed the physical-device
  * node.  Read it and the mode node, and create a vbd.  If the frontend is
@@ -783,7 +783,6 @@ static void backend_changed(struct xenbus_watch *watch,
        }
 }
 
-
 /*
  * Callback received when the frontend's state changes.
  */
@@ -858,9 +857,27 @@ static void frontend_changed(struct xenbus_device *dev,
        }
 }
 
+/* Once a memory pressure is detected, squeeze free page pools for a while. */
+static unsigned int buffer_squeeze_duration_ms = 10;
+module_param_named(buffer_squeeze_duration_ms,
+               buffer_squeeze_duration_ms, int, 0644);
+MODULE_PARM_DESC(buffer_squeeze_duration_ms,
+"Duration in ms to squeeze pages buffer when a memory pressure is detected");
 
-/* ** Connection ** */
+/*
+ * Callback received when the memory pressure is detected.
+ */
+static void reclaim_memory(struct xenbus_device *dev)
+{
+       struct backend_info *be = dev_get_drvdata(&dev->dev);
 
+       if (!be)
+               return;
+       be->blkif->buffer_squeeze_end = jiffies +
+               msecs_to_jiffies(buffer_squeeze_duration_ms);
+}
+
+/* ** Connection ** */
 
 /*
  * Write the physical details regarding the block device to the store, and
@@ -1152,6 +1169,7 @@ static struct xenbus_driver xen_blkbk_driver = {
        .remove = xen_blkbk_remove,
        .otherend_changed = frontend_changed,
        .allow_rebind = true,
+       .reclaim_memory = reclaim_memory,
 };
 
 int xen_blkif_xenbus_init(void)
index 57d50c5..e2ad6bb 100644 (file)
@@ -151,9 +151,6 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the
 #define BLK_RING_SIZE(info)    \
        __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
 
-#define BLK_MAX_RING_SIZE      \
-       __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
-
 /*
  * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
  * characters are enough. Define to 20 to keep consistent with backend.
@@ -177,12 +174,12 @@ struct blkfront_ring_info {
        unsigned int evtchn, irq;
        struct work_struct work;
        struct gnttab_free_callback callback;
-       struct blk_shadow shadow[BLK_MAX_RING_SIZE];
        struct list_head indirect_pages;
        struct list_head grants;
        unsigned int persistent_gnts_c;
        unsigned long shadow_free;
        struct blkfront_info *dev_info;
+       struct blk_shadow shadow[];
 };
 
 /*
@@ -1915,7 +1912,8 @@ static int negotiate_mq(struct blkfront_info *info)
                info->nr_rings = 1;
 
        info->rinfo = kvcalloc(info->nr_rings,
-                              sizeof(struct blkfront_ring_info),
+                              struct_size(info->rinfo, shadow,
+                                          BLK_RING_SIZE(info)),
                               GFP_KERNEL);
        if (!info->rinfo) {
                xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
index d2a5791..cbf5eae 100644 (file)
@@ -157,7 +157,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
 
        /* Clock is optional on most platforms */
        priv->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+       if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
        priv->rng.name = pdev->name;
index 0ed07d1..6595239 100644 (file)
@@ -476,7 +476,7 @@ static int omap_rng_probe(struct platform_device *pdev)
        }
 
        priv->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+       if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
        if (!IS_ERR(priv->clk)) {
                ret = clk_prepare_enable(priv->clk);
@@ -488,7 +488,7 @@ static int omap_rng_probe(struct platform_device *pdev)
        }
 
        priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
-       if (IS_ERR(priv->clk_reg) && PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
+       if (PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
        if (!IS_ERR(priv->clk_reg)) {
                ret = clk_prepare_enable(priv->clk_reg);
index ac5981c..bcb257b 100644 (file)
@@ -27,7 +27,7 @@ config COMMON_CLK_WM831X
        tristate "Clock driver for WM831x/2x PMICs"
        depends on MFD_WM831X
        ---help---
-          Supports the clocking subsystem of the WM831x/2x series of
+         Supports the clocking subsystem of the WM831x/2x series of
          PMICs from Wolfson Microelectronics.
 
 source "drivers/clk/versatile/Kconfig"
@@ -174,6 +174,18 @@ config COMMON_CLK_CS2000_CP
        help
          If you say yes here you get support for the CS2000 clock multiplier.
 
+config COMMON_CLK_FSL_SAI
+       bool "Clock driver for BCLK of Freescale SAI cores"
+       depends on ARCH_LAYERSCAPE || COMPILE_TEST
+       help
+         This driver supports the Freescale SAI (Synchronous Audio Interface)
+         to be used as a generic clock output. Some SoCs have restrictions
+         regarding the possible pin multiplexer settings. Eg. on some SoCs
+         two SAI interfaces can only be enabled together. If just one is
+         needed, the BCLK pin of the second one can be used as general
+         purpose clock output. Ideally, it can be used to drive an audio
+         codec (sometimes known as MCLK).
+
 config COMMON_CLK_GEMINI
        bool "Clock driver for Cortina Systems Gemini SoC"
        depends on ARCH_GEMINI || COMPILE_TEST
@@ -225,6 +237,16 @@ config CLK_QORIQ
          This adds the clock driver support for Freescale QorIQ platforms
          using common clock framework.
 
+config CLK_LS1028A_PLLDIG
+        tristate "Clock driver for LS1028A Display output"
+        depends on ARCH_LAYERSCAPE || COMPILE_TEST
+        default ARCH_LAYERSCAPE
+        help
+          This driver support the Display output interfaces(LCD, DPHY) pixel clocks
+          of the QorIQ Layerscape LS1028A, as implemented TSMC CLN28HPM PLL. Not all
+          features of the PLL are currently supported by the driver. By default,
+          configured bypass mode with this PLL.
+
 config COMMON_CLK_XGENE
        bool "Clock driver for APM XGene SoC"
        default ARCH_XGENE
index 0696a0c..f4169cc 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_ARCH_CLPS711X)           += clk-clps711x.o
 obj-$(CONFIG_COMMON_CLK_CS2000_CP)     += clk-cs2000-cp.o
 obj-$(CONFIG_ARCH_EFM32)               += clk-efm32gg.o
 obj-$(CONFIG_COMMON_CLK_FIXED_MMIO)    += clk-fixed-mmio.o
+obj-$(CONFIG_COMMON_CLK_FSL_SAI)       += clk-fsl-sai.o
 obj-$(CONFIG_COMMON_CLK_GEMINI)                += clk-gemini.o
 obj-$(CONFIG_COMMON_CLK_ASPEED)                += clk-aspeed.o
 obj-$(CONFIG_MACH_ASPEED_G6)           += clk-ast2600.o
@@ -44,6 +45,7 @@ obj-$(CONFIG_ARCH_NPCM7XX)            += clk-npcm7xx.o
 obj-$(CONFIG_ARCH_NSPIRE)              += clk-nspire.o
 obj-$(CONFIG_COMMON_CLK_OXNAS)         += clk-oxnas.o
 obj-$(CONFIG_COMMON_CLK_PALMAS)                += clk-palmas.o
+obj-$(CONFIG_CLK_LS1028A_PLLDIG)       += clk-plldig.o
 obj-$(CONFIG_COMMON_CLK_PWM)           += clk-pwm.o
 obj-$(CONFIG_CLK_QORIQ)                        += clk-qoriq.o
 obj-$(CONFIG_COMMON_CLK_RK808)         += clk-rk808.o
index 34b8178..dfb354a 100644 (file)
@@ -25,7 +25,8 @@
 #define                PMC_PLL_CTRL1_MUL_MSK           GENMASK(30, 24)
 
 #define PMC_PLL_ACR    0x18
-#define                PMC_PLL_ACR_DEFAULT             0x1b040010UL
+#define                PMC_PLL_ACR_DEFAULT_UPLL        0x12020010UL
+#define                PMC_PLL_ACR_DEFAULT_PLLA        0x00020010UL
 #define                PMC_PLL_ACR_UTMIVR              BIT(12)
 #define                PMC_PLL_ACR_UTMIBG              BIT(13)
 #define                PMC_PLL_ACR_LOOP_FILTER_MSK     GENMASK(31, 24)
@@ -88,7 +89,10 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
        }
 
        /* Recommended value for PMC_PLL_ACR */
-       val = PMC_PLL_ACR_DEFAULT;
+       if (pll->characteristics->upll)
+               val = PMC_PLL_ACR_DEFAULT_UPLL;
+       else
+               val = PMC_PLL_ACR_DEFAULT_PLLA;
        regmap_write(regmap, PMC_PLL_ACR, val);
 
        regmap_write(regmap, PMC_PLL_CTRL1,
index 86238d5..77398ae 100644 (file)
@@ -47,6 +47,7 @@ static const struct clk_programmable_layout sam9x60_programmable_layout = {
        .pres_shift = 8,
        .css_mask = 0x1f,
        .have_slck_mck = 0,
+       .is_pres_direct = 1,
 };
 
 static const struct clk_pcr_layout sam9x60_pcr_layout = {
index dd0f90c..536b59a 100644 (file)
@@ -260,7 +260,6 @@ static void __init asm9260_acc_init(struct device_node *np)
        const char *ref_clk, *pll_clk = "pll";
        u32 rate;
        int n;
-       u32 accuracy = 0;
 
        clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
        if (!clk_data)
@@ -275,10 +274,11 @@ static void __init asm9260_acc_init(struct device_node *np)
        /* register pll */
        rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
 
+       /* TODO: Convert to DT parent scheme */
        ref_clk = of_clk_get_parent_name(np, 0);
-       accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
-       hw = clk_hw_register_fixed_rate_with_accuracy(NULL, pll_clk,
-                       ref_clk, 0, rate, accuracy);
+       hw = __clk_hw_register_fixed_rate_with_accuracy(NULL, NULL, pll_clk,
+                       ref_clk, NULL, NULL, 0, rate, 0,
+                       CLK_FIXED_RATE_PARENT_ACCURACY);
 
        if (IS_ERR(hw))
                panic("%pOFn: can't register REFCLK. Check DT!", np);
index 4cd175a..e6d6599 100644 (file)
@@ -474,11 +474,10 @@ static struct bm1880_composite_clock bm1880_composite_clks[] = {
 static unsigned long bm1880_pll_rate_calc(u32 regval, unsigned long parent_rate)
 {
        u64 numerator;
-       u32 fbdiv, fref, refdiv;
+       u32 fbdiv, refdiv;
        u32 postdiv1, postdiv2, denominator;
 
        fbdiv = (regval >> 16) & 0xfff;
-       fref = parent_rate;
        refdiv = regval & 0x1f;
        postdiv1 = (regval >> 8) & 0x7;
        postdiv2 = (regval >> 12) & 0x7;
index 3e9c3e6..7376f57 100644 (file)
@@ -199,8 +199,9 @@ static void clk_composite_disable(struct clk_hw *hw)
        gate_ops->disable(gate_hw);
 }
 
-struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
-                       const char * const *parent_names, int num_parents,
+static struct clk_hw *__clk_hw_register_composite(struct device *dev,
+                       const char *name, const char * const *parent_names,
+                       const struct clk_parent_data *pdata, int num_parents,
                        struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
                        struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
                        struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
@@ -218,7 +219,10 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
 
        init.name = name;
        init.flags = flags;
-       init.parent_names = parent_names;
+       if (parent_names)
+               init.parent_names = parent_names;
+       else
+               init.parent_data = pdata;
        init.num_parents = num_parents;
        hw = &composite->hw;
 
@@ -312,6 +316,34 @@ err:
        return hw;
 }
 
+struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
+                       const char * const *parent_names, int num_parents,
+                       struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+                       struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+                       struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+                       unsigned long flags)
+{
+       return __clk_hw_register_composite(dev, name, parent_names, NULL,
+                                          num_parents, mux_hw, mux_ops,
+                                          rate_hw, rate_ops, gate_hw,
+                                          gate_ops, flags);
+}
+
+struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
+                       const char *name,
+                       const struct clk_parent_data *parent_data,
+                       int num_parents,
+                       struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+                       struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+                       struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+                       unsigned long flags)
+{
+       return __clk_hw_register_composite(dev, name, NULL, parent_data,
+                                          num_parents, mux_hw, mux_ops,
+                                          rate_hw, rate_ops, gate_hw,
+                                          gate_ops, flags);
+}
+
 struct clk *clk_register_composite(struct device *dev, const char *name,
                        const char * const *parent_names, int num_parents,
                        struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
@@ -329,6 +361,24 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
        return hw->clk;
 }
 
+struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
+                       const struct clk_parent_data *parent_data,
+                       int num_parents,
+                       struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+                       struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+                       struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+                       unsigned long flags)
+{
+       struct clk_hw *hw;
+
+       hw = clk_hw_register_composite_pdata(dev, name, parent_data,
+                       num_parents, mux_hw, mux_ops, rate_hw, rate_ops,
+                       gate_hw, gate_ops, flags);
+       if (IS_ERR(hw))
+               return ERR_CAST(hw);
+       return hw->clk;
+}
+
 void clk_unregister_composite(struct clk *clk)
 {
        struct clk_composite *composite;
index 098b2b0..8de12cb 100644 (file)
@@ -463,11 +463,12 @@ const struct clk_ops clk_divider_ro_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
 
-static struct clk_hw *_register_divider(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_divider_flags, const struct clk_div_table *table,
-               spinlock_t *lock)
+struct clk_hw *__clk_hw_register_divider(struct device *dev,
+               struct device_node *np, const char *name,
+               const char *parent_name, const struct clk_hw *parent_hw,
+               const struct clk_parent_data *parent_data, unsigned long flags,
+               void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+               const struct clk_div_table *table, spinlock_t *lock)
 {
        struct clk_divider *div;
        struct clk_hw *hw;
@@ -514,55 +515,7 @@ static struct clk_hw *_register_divider(struct device *dev, const char *name,
 
        return hw;
 }
-
-/**
- * clk_register_divider - register a divider clock with the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk *clk_register_divider(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_divider_flags, spinlock_t *lock)
-{
-       struct clk_hw *hw;
-
-       hw =  _register_divider(dev, name, parent_name, flags, reg, shift,
-                       width, clk_divider_flags, NULL, lock);
-       if (IS_ERR(hw))
-               return ERR_CAST(hw);
-       return hw->clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_divider);
-
-/**
- * clk_hw_register_divider - register a divider clock with the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_divider_flags, spinlock_t *lock)
-{
-       return _register_divider(dev, name, parent_name, flags, reg, shift,
-                       width, clk_divider_flags, NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_divider);
+EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
 
 /**
  * clk_register_divider_table - register a table based divider clock with
@@ -586,39 +539,15 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
 {
        struct clk_hw *hw;
 
-       hw =  _register_divider(dev, name, parent_name, flags, reg, shift,
-                       width, clk_divider_flags, table, lock);
+       hw =  __clk_hw_register_divider(dev, NULL, name, parent_name, NULL,
+                       NULL, flags, reg, shift, width, clk_divider_flags,
+                       table, lock);
        if (IS_ERR(hw))
                return ERR_CAST(hw);
        return hw->clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_divider_table);
 
-/**
- * clk_hw_register_divider_table - register a table based divider clock with
- * the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @table: array of divider/value pairs ending with a div set to 0
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_divider_table(struct device *dev,
-               const char *name, const char *parent_name, unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_divider_flags, const struct clk_div_table *table,
-               spinlock_t *lock)
-{
-       return _register_divider(dev, name, parent_name, flags, reg, shift,
-                       width, clk_divider_flags, table, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_divider_table);
-
 void clk_unregister_divider(struct clk *clk)
 {
        struct clk_divider *div;
index 2c4486c..77499a2 100644 (file)
@@ -24,6 +24,8 @@
  * parent - fixed parent.  No clk_set_parent support
  */
 
+#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+
 static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
@@ -33,7 +35,12 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
 static unsigned long clk_fixed_rate_recalc_accuracy(struct clk_hw *hw,
                unsigned long parent_accuracy)
 {
-       return to_clk_fixed_rate(hw)->fixed_accuracy;
+       struct clk_fixed_rate *fixed = to_clk_fixed_rate(hw);
+
+       if (fixed->flags & CLK_FIXED_RATE_PARENT_ACCURACY)
+               return parent_accuracy;
+
+       return fixed->fixed_accuracy;
 }
 
 const struct clk_ops clk_fixed_rate_ops = {
@@ -42,24 +49,17 @@ const struct clk_ops clk_fixed_rate_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
 
-/**
- * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
- * the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @fixed_rate: non-adjustable clock rate
- * @fixed_accuracy: non-adjustable clock rate
- */
-struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
-               const char *name, const char *parent_name, unsigned long flags,
-               unsigned long fixed_rate, unsigned long fixed_accuracy)
+struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+               struct device_node *np, const char *name,
+               const char *parent_name, const struct clk_hw *parent_hw,
+               const struct clk_parent_data *parent_data, unsigned long flags,
+               unsigned long fixed_rate, unsigned long fixed_accuracy,
+               unsigned long clk_fixed_flags)
 {
        struct clk_fixed_rate *fixed;
        struct clk_hw *hw;
        struct clk_init_data init = {};
-       int ret;
+       int ret = -EINVAL;
 
        /* allocate fixed-rate clock */
        fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
@@ -69,17 +69,26 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
        init.name = name;
        init.ops = &clk_fixed_rate_ops;
        init.flags = flags;
-       init.parent_names = (parent_name ? &parent_name: NULL);
-       init.num_parents = (parent_name ? 1 : 0);
+       init.parent_names = parent_name ? &parent_name : NULL;
+       init.parent_hws = parent_hw ? &parent_hw : NULL;
+       init.parent_data = parent_data;
+       if (parent_name || parent_hw || parent_data)
+               init.num_parents = 1;
+       else
+               init.num_parents = 0;
 
        /* struct clk_fixed_rate assignments */
+       fixed->flags = clk_fixed_flags;
        fixed->fixed_rate = fixed_rate;
        fixed->fixed_accuracy = fixed_accuracy;
        fixed->hw.init = &init;
 
        /* register the clock */
        hw = &fixed->hw;
-       ret = clk_hw_register(dev, hw);
+       if (dev || !np)
+               ret = clk_hw_register(dev, hw);
+       else if (np)
+               ret = of_clk_hw_register(np, hw);
        if (ret) {
                kfree(fixed);
                hw = ERR_PTR(ret);
@@ -87,47 +96,20 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
 
        return hw;
 }
-EXPORT_SYMBOL_GPL(clk_hw_register_fixed_rate_with_accuracy);
+EXPORT_SYMBOL_GPL(__clk_hw_register_fixed_rate);
 
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
-               const char *name, const char *parent_name, unsigned long flags,
-               unsigned long fixed_rate, unsigned long fixed_accuracy)
+struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+               const char *parent_name, unsigned long flags,
+               unsigned long fixed_rate)
 {
        struct clk_hw *hw;
 
        hw = clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
-                       flags, fixed_rate, fixed_accuracy);
+                                                     flags, fixed_rate, 0);
        if (IS_ERR(hw))
                return ERR_CAST(hw);
        return hw->clk;
 }
-EXPORT_SYMBOL_GPL(clk_register_fixed_rate_with_accuracy);
-
-/**
- * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
- * framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @fixed_rate: non-adjustable clock rate
- */
-struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
-               unsigned long fixed_rate)
-{
-       return clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
-                                                    flags, fixed_rate, 0);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_fixed_rate);
-
-struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
-               unsigned long fixed_rate)
-{
-       return clk_register_fixed_rate_with_accuracy(dev, name, parent_name,
-                                                    flags, fixed_rate, 0);
-}
 EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
 
 void clk_unregister_fixed_rate(struct clk *clk)
@@ -155,9 +137,9 @@ void clk_hw_unregister_fixed_rate(struct clk_hw *hw)
 EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_rate);
 
 #ifdef CONFIG_OF
-static struct clk *_of_fixed_clk_setup(struct device_node *node)
+static struct clk_hw *_of_fixed_clk_setup(struct device_node *node)
 {
-       struct clk *clk;
+       struct clk_hw *hw;
        const char *clk_name = node->name;
        u32 rate;
        u32 accuracy = 0;
@@ -170,18 +152,18 @@ static struct clk *_of_fixed_clk_setup(struct device_node *node)
 
        of_property_read_string(node, "clock-output-names", &clk_name);
 
-       clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
+       hw = clk_hw_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
                                                    0, rate, accuracy);
-       if (IS_ERR(clk))
-               return clk;
+       if (IS_ERR(hw))
+               return hw;
 
-       ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
        if (ret) {
-               clk_unregister(clk);
+               clk_hw_unregister_fixed_rate(hw);
                return ERR_PTR(ret);
        }
 
-       return clk;
+       return hw;
 }
 
 /**
@@ -195,27 +177,27 @@ CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
 
 static int of_fixed_clk_remove(struct platform_device *pdev)
 {
-       struct clk *clk = platform_get_drvdata(pdev);
+       struct clk_hw *hw = platform_get_drvdata(pdev);
 
        of_clk_del_provider(pdev->dev.of_node);
-       clk_unregister_fixed_rate(clk);
+       clk_hw_unregister_fixed_rate(hw);
 
        return 0;
 }
 
 static int of_fixed_clk_probe(struct platform_device *pdev)
 {
-       struct clk *clk;
+       struct clk_hw *hw;
 
        /*
         * This function is not executed when of_fixed_clk_setup
         * succeeded.
         */
-       clk = _of_fixed_clk_setup(pdev->dev.of_node);
-       if (IS_ERR(clk))
-               return PTR_ERR(clk);
+       hw = _of_fixed_clk_setup(pdev->dev.of_node);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
 
-       platform_set_drvdata(pdev, clk);
+       platform_set_drvdata(pdev, hw);
 
        return 0;
 }
@@ -224,7 +206,6 @@ static const struct of_device_id of_fixed_clk_ids[] = {
        { .compatible = "fixed-clock" },
        { }
 };
-MODULE_DEVICE_TABLE(of, of_fixed_clk_ids);
 
 static struct platform_driver of_fixed_clk_driver = {
        .driver = {
diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c
new file mode 100644 (file)
index 0000000..0221180
--- /dev/null
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale SAI BCLK as a generic clock driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#define I2S_CSR                0x00
+#define I2S_CR2                0x08
+#define CSR_BCE_BIT    28
+#define CR2_BCD                BIT(24)
+#define CR2_DIV_SHIFT  0
+#define CR2_DIV_WIDTH  8
+
+struct fsl_sai_clk {
+       struct clk_divider div;
+       struct clk_gate gate;
+       spinlock_t lock;
+};
+
+static int fsl_sai_clk_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct fsl_sai_clk *sai_clk;
+       struct clk_parent_data pdata = { .index = 0 };
+       void __iomem *base;
+       struct clk_hw *hw;
+       struct resource *res;
+
+       sai_clk = devm_kzalloc(dev, sizeof(*sai_clk), GFP_KERNEL);
+       if (!sai_clk)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       spin_lock_init(&sai_clk->lock);
+
+       sai_clk->gate.reg = base + I2S_CSR;
+       sai_clk->gate.bit_idx = CSR_BCE_BIT;
+       sai_clk->gate.lock = &sai_clk->lock;
+
+       sai_clk->div.reg = base + I2S_CR2;
+       sai_clk->div.shift = CR2_DIV_SHIFT;
+       sai_clk->div.width = CR2_DIV_WIDTH;
+       sai_clk->div.lock = &sai_clk->lock;
+
+       /* set clock direction, we are the BCLK master */
+       writel(CR2_BCD, base + I2S_CR2);
+
+       hw = clk_hw_register_composite_pdata(dev, dev->of_node->name,
+                                            &pdata, 1, NULL, NULL,
+                                            &sai_clk->div.hw,
+                                            &clk_divider_ops,
+                                            &sai_clk->gate.hw,
+                                            &clk_gate_ops,
+                                            CLK_SET_RATE_GATE);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
+static const struct of_device_id of_fsl_sai_clk_ids[] = {
+       { .compatible = "fsl,vf610-sai-clock" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, of_fsl_sai_clk_ids);
+
+static struct platform_driver fsl_sai_clk_driver = {
+       .probe = fsl_sai_clk_probe,
+       .driver         = {
+               .name   = "fsl-sai-clk",
+               .of_match_table = of_fsl_sai_clk_ids,
+       },
+};
+module_platform_driver(fsl_sai_clk_driver);
+
+MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:fsl-sai-clk");
index 670053c..2ca1f2a 100644 (file)
@@ -123,26 +123,18 @@ const struct clk_ops clk_gate_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_gate_ops);
 
-/**
- * clk_hw_register_gate - register a gate clock with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of this clock's parent
- * @flags: framework-specific flags for this clock
- * @reg: register address to control gating of this clock
- * @bit_idx: which bit in the register controls gating of this clock
- * @clk_gate_flags: gate-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
+struct clk_hw *__clk_hw_register_gate(struct device *dev,
+               struct device_node *np, const char *name,
+               const char *parent_name, const struct clk_hw *parent_hw,
+               const struct clk_parent_data *parent_data,
+               unsigned long flags,
                void __iomem *reg, u8 bit_idx,
                u8 clk_gate_flags, spinlock_t *lock)
 {
        struct clk_gate *gate;
        struct clk_hw *hw;
        struct clk_init_data init = {};
-       int ret;
+       int ret = -EINVAL;
 
        if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
                if (bit_idx > 15) {
@@ -160,7 +152,12 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
        init.ops = &clk_gate_ops;
        init.flags = flags;
        init.parent_names = parent_name ? &parent_name : NULL;
-       init.num_parents = parent_name ? 1 : 0;
+       init.parent_hws = parent_hw ? &parent_hw : NULL;
+       init.parent_data = parent_data;
+       if (parent_name || parent_hw || parent_data)
+               init.num_parents = 1;
+       else
+               init.num_parents = 0;
 
        /* struct clk_gate assignments */
        gate->reg = reg;
@@ -170,15 +167,19 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
        gate->hw.init = &init;
 
        hw = &gate->hw;
-       ret = clk_hw_register(dev, hw);
+       if (dev || !np)
+               ret = clk_hw_register(dev, hw);
+       else if (np)
+               ret = of_clk_hw_register(np, hw);
        if (ret) {
                kfree(gate);
                hw = ERR_PTR(ret);
        }
 
        return hw;
+
 }
-EXPORT_SYMBOL_GPL(clk_hw_register_gate);
+EXPORT_SYMBOL_GPL(__clk_hw_register_gate);
 
 struct clk *clk_register_gate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
index 13304cf..70397b4 100644 (file)
  * parent - fixed parent.  No clk_set_parent support
  */
 
+/**
+ * struct clk_gpio - gpio gated clock
+ *
+ * @hw:                handle between common and hardware-specific interfaces
+ * @gpiod:     gpio descriptor
+ *
+ * Clock with a gpio control for enabling and disabling the parent clock
+ * or switching between two parents by asserting or deasserting the gpio.
+ *
+ * Implements .enable, .disable and .is_enabled or
+ * .get_parent, .set_parent and .determine_rate depending on which clk_ops
+ * is used.
+ */
+struct clk_gpio {
+       struct clk_hw   hw;
+       struct gpio_desc *gpiod;
+};
+
+#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
+
 static int clk_gpio_gate_enable(struct clk_hw *hw)
 {
        struct clk_gpio *clk = to_clk_gpio(hw);
@@ -51,12 +71,11 @@ static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
        return gpiod_get_value(clk->gpiod);
 }
 
-const struct clk_ops clk_gpio_gate_ops = {
+static const struct clk_ops clk_gpio_gate_ops = {
        .enable = clk_gpio_gate_enable,
        .disable = clk_gpio_gate_disable,
        .is_enabled = clk_gpio_gate_is_enabled,
 };
-EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
 
 static int clk_sleeping_gpio_gate_prepare(struct clk_hw *hw)
 {
@@ -111,67 +130,49 @@ static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index)
        return 0;
 }
 
-const struct clk_ops clk_gpio_mux_ops = {
+static const struct clk_ops clk_gpio_mux_ops = {
        .get_parent = clk_gpio_mux_get_parent,
        .set_parent = clk_gpio_mux_set_parent,
        .determine_rate = __clk_mux_determine_rate,
 };
-EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
 
-static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-               unsigned long flags, const struct clk_ops *clk_gpio_ops)
+static struct clk_hw *clk_register_gpio(struct device *dev, u8 num_parents,
+                                       struct gpio_desc *gpiod,
+                                       const struct clk_ops *clk_gpio_ops)
 {
        struct clk_gpio *clk_gpio;
        struct clk_hw *hw;
        struct clk_init_data init = {};
        int err;
+       const struct clk_parent_data gpio_parent_data[] = {
+               { .index = 0 },
+               { .index = 1 },
+       };
 
-       if (dev)
-               clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio), GFP_KERNEL);
-       else
-               clk_gpio = kzalloc(sizeof(*clk_gpio), GFP_KERNEL);
-
+       clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio), GFP_KERNEL);
        if (!clk_gpio)
                return ERR_PTR(-ENOMEM);
 
-       init.name = name;
+       init.name = dev->of_node->name;
        init.ops = clk_gpio_ops;
-       init.flags = flags;
-       init.parent_names = parent_names;
+       init.parent_data = gpio_parent_data;
        init.num_parents = num_parents;
+       init.flags = CLK_SET_RATE_PARENT;
 
        clk_gpio->gpiod = gpiod;
        clk_gpio->hw.init = &init;
 
        hw = &clk_gpio->hw;
-       if (dev)
-               err = devm_clk_hw_register(dev, hw);
-       else
-               err = clk_hw_register(NULL, hw);
-
-       if (!err)
-               return hw;
-
-       if (!dev) {
-               kfree(clk_gpio);
-       }
+       err = devm_clk_hw_register(dev, hw);
+       if (err)
+               return ERR_PTR(err);
 
-       return ERR_PTR(err);
+       return hw;
 }
 
-/**
- * clk_hw_register_gpio_gate - register a gpio clock gate with the clock
- * framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of this clock's parent
- * @gpiod: gpio descriptor to gate this clock
- * @flags: clock flags
- */
-struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
-               const char *parent_name, struct gpio_desc *gpiod,
-               unsigned long flags)
+static struct clk_hw *clk_hw_register_gpio_gate(struct device *dev,
+                                               int num_parents,
+                                               struct gpio_desc *gpiod)
 {
        const struct clk_ops *ops;
 
@@ -180,88 +181,36 @@ struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
        else
                ops = &clk_gpio_gate_ops;
 
-       return clk_register_gpio(dev, name,
-                       (parent_name ? &parent_name : NULL),
-                       (parent_name ? 1 : 0), gpiod, flags, ops);
+       return clk_register_gpio(dev, num_parents, gpiod, ops);
 }
-EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
 
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
-               const char *parent_name, struct gpio_desc *gpiod,
-               unsigned long flags)
+static struct clk_hw *clk_hw_register_gpio_mux(struct device *dev,
+                                              struct gpio_desc *gpiod)
 {
-       struct clk_hw *hw;
-
-       hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpiod, flags);
-       if (IS_ERR(hw))
-               return ERR_CAST(hw);
-       return hw->clk;
+       return clk_register_gpio(dev, 2, gpiod, &clk_gpio_mux_ops);
 }
-EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
-
-/**
- * clk_hw_register_gpio_mux - register a gpio clock mux with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_names: names of this clock's parents
- * @num_parents: number of parents listed in @parent_names
- * @gpiod: gpio descriptor to gate this clock
- * @flags: clock flags
- */
-struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-               unsigned long flags)
-{
-       if (num_parents != 2) {
-               pr_err("mux-clock %s must have 2 parents\n", name);
-               return ERR_PTR(-EINVAL);
-       }
-
-       return clk_register_gpio(dev, name, parent_names, num_parents,
-                       gpiod, flags, &clk_gpio_mux_ops);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_gpio_mux);
-
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-               unsigned long flags)
-{
-       struct clk_hw *hw;
-
-       hw = clk_hw_register_gpio_mux(dev, name, parent_names, num_parents,
-                       gpiod, flags);
-       if (IS_ERR(hw))
-               return ERR_CAST(hw);
-       return hw->clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
 
 static int gpio_clk_driver_probe(struct platform_device *pdev)
 {
-       struct device_node *node = pdev->dev.of_node;
-       const char **parent_names, *gpio_name;
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       const char *gpio_name;
        unsigned int num_parents;
        struct gpio_desc *gpiod;
-       struct clk *clk;
+       struct clk_hw *hw;
        bool is_mux;
        int ret;
 
+       is_mux = of_device_is_compatible(node, "gpio-mux-clock");
+
        num_parents = of_clk_get_parent_count(node);
-       if (num_parents) {
-               parent_names = devm_kcalloc(&pdev->dev, num_parents,
-                                           sizeof(char *), GFP_KERNEL);
-               if (!parent_names)
-                       return -ENOMEM;
-
-               of_clk_parent_fill(node, parent_names, num_parents);
-       } else {
-               parent_names = NULL;
+       if (is_mux && num_parents != 2) {
+               dev_err(dev, "mux-clock must have 2 parents\n");
+               return -EINVAL;
        }
 
-       is_mux = of_device_is_compatible(node, "gpio-mux-clock");
-
        gpio_name = is_mux ? "select" : "enable";
-       gpiod = devm_gpiod_get(&pdev->dev, gpio_name, GPIOD_OUT_LOW);
+       gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW);
        if (IS_ERR(gpiod)) {
                ret = PTR_ERR(gpiod);
                if (ret == -EPROBE_DEFER)
@@ -275,16 +224,13 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
        }
 
        if (is_mux)
-               clk = clk_register_gpio_mux(&pdev->dev, node->name,
-                               parent_names, num_parents, gpiod, 0);
+               hw = clk_hw_register_gpio_mux(dev, gpiod);
        else
-               clk = clk_register_gpio_gate(&pdev->dev, node->name,
-                               parent_names ?  parent_names[0] : NULL, gpiod,
-                               CLK_SET_RATE_PARENT);
-       if (IS_ERR(clk))
-               return PTR_ERR(clk);
+               hw = clk_hw_register_gpio_gate(dev, num_parents, gpiod);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
 
-       return of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
 }
 
 static const struct of_device_id gpio_clk_match_table[] = {
index 570b6e5..e54e797 100644 (file)
@@ -145,17 +145,19 @@ const struct clk_ops clk_mux_ro_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
 
-struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents,
-               unsigned long flags,
-               void __iomem *reg, u8 shift, u32 mask,
+struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
+               const char *name, u8 num_parents,
+               const char * const *parent_names,
+               const struct clk_hw **parent_hws,
+               const struct clk_parent_data *parent_data,
+               unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock)
 {
        struct clk_mux *mux;
        struct clk_hw *hw;
        struct clk_init_data init = {};
        u8 width = 0;
-       int ret;
+       int ret = -EINVAL;
 
        if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
                width = fls(mask) - ffs(mask) + 1;
@@ -177,6 +179,8 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
                init.ops = &clk_mux_ops;
        init.flags = flags;
        init.parent_names = parent_names;
+       init.parent_data = parent_data;
+       init.parent_hws = parent_hws;
        init.num_parents = num_parents;
 
        /* struct clk_mux assignments */
@@ -189,7 +193,10 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
        mux->hw.init = &init;
 
        hw = &mux->hw;
-       ret = clk_hw_register(dev, hw);
+       if (dev || !np)
+               ret = clk_hw_register(dev, hw);
+       else if (np)
+               ret = of_clk_hw_register(np, hw);
        if (ret) {
                kfree(mux);
                hw = ERR_PTR(ret);
@@ -197,53 +204,24 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
 
        return hw;
 }
-EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
+EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
 
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
                const char * const *parent_names, u8 num_parents,
-               unsigned long flags,
-               void __iomem *reg, u8 shift, u32 mask,
+               unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock)
 {
        struct clk_hw *hw;
 
-       hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
-                                      flags, reg, shift, mask, clk_mux_flags,
-                                      table, lock);
+       hw = clk_hw_register_mux_table(dev, name, parent_names,
+                                      num_parents, flags, reg, shift, mask,
+                                      clk_mux_flags, table, lock);
        if (IS_ERR(hw))
                return ERR_CAST(hw);
        return hw->clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_mux_table);
 
-struct clk *clk_register_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents,
-               unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_mux_flags, spinlock_t *lock)
-{
-       u32 mask = BIT(width) - 1;
-
-       return clk_register_mux_table(dev, name, parent_names, num_parents,
-                                     flags, reg, shift, mask, clk_mux_flags,
-                                     NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_register_mux);
-
-struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents,
-               unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_mux_flags, spinlock_t *lock)
-{
-       u32 mask = BIT(width) - 1;
-
-       return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
-                                     flags, reg, shift, mask, clk_mux_flags,
-                                     NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_mux);
-
 void clk_unregister_mux(struct clk *clk)
 {
        struct clk_mux *mux;
diff --git a/drivers/clk/clk-plldig.c b/drivers/clk/clk-plldig.c
new file mode 100644 (file)
index 0000000..312b831
--- /dev/null
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ *
+ * Clock driver for LS1028A Display output interfaces(LCD, DPHY).
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+
+/* PLLDIG register offsets and bit masks */
+#define PLLDIG_REG_PLLSR            0x24
+#define PLLDIG_LOCK_MASK            BIT(2)
+#define PLLDIG_REG_PLLDV            0x28
+#define PLLDIG_MFD_MASK             GENMASK(7, 0)
+#define PLLDIG_RFDPHI1_MASK         GENMASK(30, 25)
+#define PLLDIG_REG_PLLFM            0x2c
+#define PLLDIG_SSCGBYP_ENABLE       BIT(30)
+#define PLLDIG_REG_PLLFD            0x30
+#define PLLDIG_FDEN                 BIT(30)
+#define PLLDIG_FRAC_MASK            GENMASK(15, 0)
+#define PLLDIG_REG_PLLCAL1          0x38
+#define PLLDIG_REG_PLLCAL2          0x3c
+
+/* Range of the VCO frequencies, in Hz */
+#define PLLDIG_MIN_VCO_FREQ         650000000
+#define PLLDIG_MAX_VCO_FREQ         1300000000
+
+/* Range of the output frequencies, in Hz */
+#define PHI1_MIN_FREQ               27000000UL
+#define PHI1_MAX_FREQ               600000000UL
+
+/* Maximum value of the reduced frequency divider */
+#define MAX_RFDPHI1          63UL
+
+/* Best value of multiplication factor divider */
+#define PLLDIG_DEFAULT_MFD   44
+
+/*
+ * Denominator part of the fractional part of the
+ * loop multiplication factor.
+ */
+#define MFDEN          20480
+
+static const struct clk_parent_data parent_data[] = {
+       { .index = 0 },
+};
+
+struct clk_plldig {
+       struct clk_hw hw;
+       void __iomem *regs;
+       unsigned int vco_freq;
+};
+
+#define to_clk_plldig(_hw)     container_of(_hw, struct clk_plldig, hw)
+
+static int plldig_enable(struct clk_hw *hw)
+{
+       struct clk_plldig *data = to_clk_plldig(hw);
+       u32 val;
+
+       val = readl(data->regs + PLLDIG_REG_PLLFM);
+       /*
+        * Use Bypass mode with PLL off by default, the frequency overshoot
+        * detector output was disable. SSCG Bypass mode should be enable.
+        */
+       val |= PLLDIG_SSCGBYP_ENABLE;
+       writel(val, data->regs + PLLDIG_REG_PLLFM);
+
+       return 0;
+}
+
+static void plldig_disable(struct clk_hw *hw)
+{
+       struct clk_plldig *data = to_clk_plldig(hw);
+       u32 val;
+
+       val = readl(data->regs + PLLDIG_REG_PLLFM);
+
+       val &= ~PLLDIG_SSCGBYP_ENABLE;
+       val |= FIELD_PREP(PLLDIG_SSCGBYP_ENABLE, 0x0);
+
+       writel(val, data->regs + PLLDIG_REG_PLLFM);
+}
+
+static int plldig_is_enabled(struct clk_hw *hw)
+{
+       struct clk_plldig *data = to_clk_plldig(hw);
+
+       return readl(data->regs + PLLDIG_REG_PLLFM) &
+                             PLLDIG_SSCGBYP_ENABLE;
+}
+
+static unsigned long plldig_recalc_rate(struct clk_hw *hw,
+                                       unsigned long parent_rate)
+{
+       struct clk_plldig *data = to_clk_plldig(hw);
+       u32 val, rfdphi1;
+
+       val = readl(data->regs + PLLDIG_REG_PLLDV);
+
+       /* Check if PLL is bypassed */
+       if (val & PLLDIG_SSCGBYP_ENABLE)
+               return parent_rate;
+
+       rfdphi1 = FIELD_GET(PLLDIG_RFDPHI1_MASK, val);
+
+       /*
+        * If RFDPHI1 has a value of 1 the VCO frequency is also divided by
+        * one.
+        */
+       if (!rfdphi1)
+               rfdphi1 = 1;
+
+       return DIV_ROUND_UP(data->vco_freq, rfdphi1);
+}
+
+static unsigned long plldig_calc_target_div(unsigned long vco_freq,
+                                           unsigned long target_rate)
+{
+       unsigned long div;
+
+       div = DIV_ROUND_CLOSEST(vco_freq, target_rate);
+       div = clamp(div, 1UL, MAX_RFDPHI1);
+
+       return div;
+}
+
+static int plldig_determine_rate(struct clk_hw *hw,
+                                struct clk_rate_request *req)
+{
+       struct clk_plldig *data = to_clk_plldig(hw);
+       unsigned int div;
+
+       req->rate = clamp(req->rate, PHI1_MIN_FREQ, PHI1_MAX_FREQ);
+       div = plldig_calc_target_div(data->vco_freq, req->rate);
+       req->rate = DIV_ROUND_UP(data->vco_freq, div);
+
+       return 0;
+}
+
+static int plldig_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_plldig *data = to_clk_plldig(hw);
+       unsigned int val, cond;
+       unsigned int rfdphi1;
+
+       rate = clamp(rate, PHI1_MIN_FREQ, PHI1_MAX_FREQ);
+       rfdphi1 = plldig_calc_target_div(data->vco_freq, rate);
+
+       /* update the divider value */
+       val = readl(data->regs + PLLDIG_REG_PLLDV);
+       val &= ~PLLDIG_RFDPHI1_MASK;
+       val |= FIELD_PREP(PLLDIG_RFDPHI1_MASK, rfdphi1);
+       writel(val, data->regs + PLLDIG_REG_PLLDV);
+
+       /* waiting for old lock state to clear */
+       udelay(200);
+
+       /* Wait until PLL is locked or timeout */
+       return readl_poll_timeout_atomic(data->regs + PLLDIG_REG_PLLSR, cond,
+                                        cond & PLLDIG_LOCK_MASK, 0,
+                                        USEC_PER_MSEC);
+}
+
+static const struct clk_ops plldig_clk_ops = {
+       .enable = plldig_enable,
+       .disable = plldig_disable,
+       .is_enabled = plldig_is_enabled,
+       .recalc_rate = plldig_recalc_rate,
+       .determine_rate = plldig_determine_rate,
+       .set_rate = plldig_set_rate,
+};
+
+static int plldig_init(struct clk_hw *hw)
+{
+       struct clk_plldig *data = to_clk_plldig(hw);
+       struct clk_hw *parent = clk_hw_get_parent(hw);
+       unsigned long parent_rate = clk_hw_get_rate(parent);
+       unsigned long val;
+       unsigned long long lltmp;
+       unsigned int mfd, fracdiv = 0;
+
+       if (!parent)
+               return -EINVAL;
+
+       if (data->vco_freq) {
+               mfd = data->vco_freq / parent_rate;
+               lltmp = data->vco_freq % parent_rate;
+               lltmp *= MFDEN;
+               do_div(lltmp, parent_rate);
+               fracdiv = lltmp;
+       } else {
+               mfd = PLLDIG_DEFAULT_MFD;
+               data->vco_freq = parent_rate * mfd;
+       }
+
+       val = FIELD_PREP(PLLDIG_MFD_MASK, mfd);
+       writel(val, data->regs + PLLDIG_REG_PLLDV);
+
+       /* Enable fractional divider */
+       if (fracdiv) {
+               val = FIELD_PREP(PLLDIG_FRAC_MASK, fracdiv);
+               val |= PLLDIG_FDEN;
+               writel(val, data->regs + PLLDIG_REG_PLLFD);
+       }
+
+       return 0;
+}
+
+static int plldig_clk_probe(struct platform_device *pdev)
+{
+       struct clk_plldig *data;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(data->regs))
+               return PTR_ERR(data->regs);
+
+       data->hw.init = CLK_HW_INIT_PARENTS_DATA("dpclk",
+                                                parent_data,
+                                                &plldig_clk_ops,
+                                                0);
+
+       ret = devm_clk_hw_register(dev, &data->hw);
+       if (ret) {
+               dev_err(dev, "failed to register %s clock\n",
+                                               dev->of_node->name);
+               return ret;
+       }
+
+       ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+                                         &data->hw);
+       if (ret) {
+               dev_err(dev, "unable to add clk provider\n");
+               return ret;
+       }
+
+       /*
+        * The frequency of the VCO cannot be changed during runtime.
+        * Therefore, let the user specify a desired frequency.
+        */
+       if (!of_property_read_u32(dev->of_node, "fsl,vco-hz",
+                                 &data->vco_freq)) {
+               if (data->vco_freq < PLLDIG_MIN_VCO_FREQ ||
+                   data->vco_freq > PLLDIG_MAX_VCO_FREQ)
+                       return -EINVAL;
+       }
+
+       return plldig_init(&data->hw);
+}
+
+static const struct of_device_id plldig_clk_id[] = {
+       { .compatible = "fsl,ls1028a-plldig" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, plldig_clk_id);
+
+static struct platform_driver plldig_clk_driver = {
+       .driver = {
+               .name = "plldig-clock",
+               .of_match_table = plldig_clk_id,
+       },
+       .probe = plldig_clk_probe,
+};
+module_platform_driver(plldig_clk_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wen He <wen.he_1@nxp.com>");
+MODULE_DESCRIPTION("LS1028A Display output interface pixel clock driver");
index bed140f..d5946f7 100644 (file)
@@ -342,6 +342,32 @@ static const struct clockgen_muxinfo ls1046a_hwa2 = {
        },
 };
 
+static const struct clockgen_muxinfo ls1088a_hwa1 = {
+       {
+               {},
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+               {},
+               { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+               { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+       },
+};
+
+static const struct clockgen_muxinfo ls1088a_hwa2 = {
+       {
+               {},
+               { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+               { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+               { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+               { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+               {},
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+       },
+};
+
 static const struct clockgen_muxinfo ls1012a_cmux = {
        {
                [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
@@ -607,6 +633,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
                .cmux_groups = {
                        &clockgen2_cmux_cga12
                },
+               .hwaccel = {
+                       &ls1088a_hwa1, &ls1088a_hwa2
+               },
                .cmux_to_group = {
                        0, 0, -1
                },
index 772258d..f0f2b59 100644 (file)
@@ -429,7 +429,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
                        parent = ERR_PTR(-EPROBE_DEFER);
        } else {
                parent = clk_core_get(core, index);
-               if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
+               if (PTR_ERR(parent) == -ENOENT && entry->name)
                        parent = clk_core_lookup(entry->name);
        }
 
@@ -2996,6 +2996,41 @@ static int clk_dump_show(struct seq_file *s, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(clk_dump);
 
+#undef CLOCK_ALLOW_WRITE_DEBUGFS
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+/*
+ * This can be dangerous, therefore don't provide any real compile time
+ * configuration option for this feature.
+ * People who want to use this will need to modify the source code directly.
+ */
+static int clk_rate_set(void *data, u64 val)
+{
+       struct clk_core *core = data;
+       int ret;
+
+       clk_prepare_lock();
+       ret = clk_core_set_rate_nolock(core, val);
+       clk_prepare_unlock();
+
+       return ret;
+}
+
+#define clk_rate_mode  0644
+#else
+#define clk_rate_set   NULL
+#define clk_rate_mode  0444
+#endif
+
+static int clk_rate_get(void *data, u64 *val)
+{
+       struct clk_core *core = data;
+
+       *val = core->rate;
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
+
 static const struct {
        unsigned long flag;
        const char *name;
@@ -3145,7 +3180,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
        root = debugfs_create_dir(core->name, pdentry);
        core->dentry = root;
 
-       debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
+       debugfs_create_file("clk_rate", clk_rate_mode, root, core,
+                           &clk_rate_fops);
        debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
        debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
        debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
@@ -3338,6 +3374,26 @@ static int __clk_core_init(struct clk_core *core)
                goto out;
        }
 
+       /*
+        * optional platform-specific magic
+        *
+        * The .init callback is not used by any of the basic clock types, but
+        * exists for weird hardware that must perform initialization magic for
+        * CCF to get an accurate view of clock for any other callbacks. It may
+        * also be used needs to perform dynamic allocations. Such allocation
+        * must be freed in the terminate() callback.
+        * This callback shall not be used to initialize the parameters state,
+        * such as rate, parent, etc ...
+        *
+        * If it exist, this callback should called before any other callback of
+        * the clock
+        */
+       if (core->ops->init) {
+               ret = core->ops->init(core->hw);
+               if (ret)
+                       goto out;
+       }
+
        core->parent = __clk_init_parent(core);
 
        /*
@@ -3362,17 +3418,6 @@ static int __clk_core_init(struct clk_core *core)
                core->orphan = true;
        }
 
-       /*
-        * optional platform-specific magic
-        *
-        * The .init callback is not used by any of the basic clock types, but
-        * exists for weird hardware that must perform initialization magic.
-        * Please consider other ways of solving initialization problems before
-        * using this callback, as its use is discouraged.
-        */
-       if (core->ops->init)
-               core->ops->init(core->hw);
-
        /*
         * Set clk's accuracy.  The preferred method is to use
         * .recalc_accuracy. For simple clocks and lazy developers the default
@@ -3427,13 +3472,18 @@ static int __clk_core_init(struct clk_core *core)
                unsigned long flags;
 
                ret = clk_core_prepare(core);
-               if (ret)
+               if (ret) {
+                       pr_warn("%s: critical clk '%s' failed to prepare\n",
+                              __func__, core->name);
                        goto out;
+               }
 
                flags = clk_enable_lock();
                ret = clk_core_enable(core);
                clk_enable_unlock(flags);
                if (ret) {
+                       pr_warn("%s: critical clk '%s' failed to enable\n",
+                              __func__, core->name);
                        clk_core_unprepare(core);
                        goto out;
                }
@@ -3732,6 +3782,28 @@ fail_out:
        return ERR_PTR(ret);
 }
 
+/**
+ * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
+ * @dev: Device to get device node of
+ *
+ * Return: device node pointer of @dev, or the device node pointer of
+ * @dev->parent if dev doesn't have a device node, or NULL if neither
+ * @dev or @dev->parent have a device node.
+ */
+static struct device_node *dev_or_parent_of_node(struct device *dev)
+{
+       struct device_node *np;
+
+       if (!dev)
+               return NULL;
+
+       np = dev_of_node(dev);
+       if (!np)
+               np = dev_of_node(dev->parent);
+
+       return np;
+}
+
 /**
  * clk_register - allocate a new clock, register it and return an opaque cookie
  * @dev: device that is registering this clock
@@ -3747,7 +3819,7 @@ fail_out:
  */
 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
-       return __clk_register(dev, dev_of_node(dev), hw);
+       return __clk_register(dev, dev_or_parent_of_node(dev), hw);
 }
 EXPORT_SYMBOL_GPL(clk_register);
 
@@ -3763,7 +3835,8 @@ EXPORT_SYMBOL_GPL(clk_register);
  */
 int clk_hw_register(struct device *dev, struct clk_hw *hw)
 {
-       return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
+       return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
+                              hw));
 }
 EXPORT_SYMBOL_GPL(clk_hw_register);
 
@@ -3866,6 +3939,7 @@ static void clk_core_evict_parent_cache(struct clk_core *core)
 void clk_unregister(struct clk *clk)
 {
        unsigned long flags;
+       const struct clk_ops *ops;
 
        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
                return;
@@ -3874,7 +3948,8 @@ void clk_unregister(struct clk *clk)
 
        clk_prepare_lock();
 
-       if (clk->core->ops == &clk_nodrv_ops) {
+       ops = clk->core->ops;
+       if (ops == &clk_nodrv_ops) {
                pr_err("%s: unregistered clock: %s\n", __func__,
                       clk->core->name);
                goto unlock;
@@ -3887,6 +3962,9 @@ void clk_unregister(struct clk *clk)
        clk->core->ops = &clk_nodrv_ops;
        clk_enable_unlock(flags);
 
+       if (ops->terminate)
+               ops->terminate(clk->core->hw);
+
        if (!hlist_empty(&clk->core->children)) {
                struct clk_core *child;
                struct hlist_node *t;
index 1ac0c79..01eadee 100644 (file)
@@ -20,6 +20,12 @@ config CLK_IMX8MN
        help
            Build the driver for i.MX8MN CCM Clock Driver
 
+config CLK_IMX8MP
+       bool "IMX8MP CCM Clock Driver"
+       depends on ARCH_MXC && ARM64
+       help
+           Build the driver for i.MX8MP CCM Clock Driver
+
 config CLK_IMX8MQ
        bool "IMX8MQ CCM Clock Driver"
        depends on ARCH_MXC && ARM64
index 77a3d71..928f874 100644 (file)
@@ -18,7 +18,7 @@ obj-$(CONFIG_MXC_CLK) += \
        clk-pllv2.o \
        clk-pllv3.o \
        clk-pllv4.o \
-       clk-sccg-pll.o \
+       clk-sscg-pll.o \
        clk-pll14xx.o
 
 obj-$(CONFIG_MXC_CLK_SCU) += \
@@ -27,6 +27,7 @@ obj-$(CONFIG_MXC_CLK_SCU) += \
 
 obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
 obj-$(CONFIG_CLK_IMX8MN) += clk-imx8mn.o
+obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o
 obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
 obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
 
index 060f860..b9efcc8 100644 (file)
@@ -21,7 +21,7 @@
 #define PCG_PCD_WIDTH  3
 #define PCG_PCD_MASK   0x7
 
-struct clk_hw *imx7ulp_clk_composite(const char *name,
+struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
                                     const char * const *parent_names,
                                     int num_parents, bool mux_present,
                                     bool rate_present, bool gate_present,
index d3486ee..20f7c91 100644 (file)
@@ -123,7 +123,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
        .set_rate = imx8m_clk_composite_divider_set_rate,
 };
 
-struct clk *imx8m_clk_composite_flags(const char *name,
+struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
                                        const char * const *parent_names,
                                        int num_parents, void __iomem *reg,
                                        unsigned long flags)
@@ -171,7 +171,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
        if (IS_ERR(hw))
                goto fail;
 
-       return hw->clk;
+       return hw;
 
 fail:
        kfree(gate);
index 2a8352a..0322a84 100644 (file)
@@ -43,7 +43,7 @@ static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
 {
        struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
        struct clk_divider *div = to_clk_divider(hw);
-       unsigned long flags = 0;
+       unsigned long flags;
        unsigned int val;
 
        spin_lock_irqsave(div->lock, flags);
@@ -75,7 +75,7 @@ static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
 {
        struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
        struct clk_divider *div = to_clk_divider(hw);
-       unsigned long flags = 0;
+       unsigned long flags;
        int value;
        u32 val;
 
@@ -104,7 +104,7 @@ static int clk_divider_enable(struct clk_hw *hw)
 {
        struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
        struct clk_divider *div = to_clk_divider(hw);
-       unsigned long flags = 0;
+       unsigned long flags;
        u32 val;
 
        if (!div_gate->cached_val) {
@@ -127,7 +127,7 @@ static void clk_divider_disable(struct clk_hw *hw)
 {
        struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
        struct clk_divider *div = to_clk_divider(hw);
-       unsigned long flags = 0;
+       unsigned long flags;
        u32 val;
 
        spin_lock_irqsave(div->lock, flags);
@@ -167,13 +167,13 @@ static const struct clk_ops clk_divider_gate_ops = {
 };
 
 /*
- * NOTE: In order to resue the most code from the common divider,
+ * NOTE: In order to reuse the most code from the common divider,
  * we also design our divider following the way that provids an extra
  * clk_divider_flags, however it's fixed to CLK_DIVIDER_ONE_BASED by
  * default as our HW is. Besides that it supports only CLK_DIVIDER_READ_ONLY
  * flag which can be specified by user flexibly.
  */
-struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
                                    unsigned long flags, void __iomem *reg,
                                    u8 shift, u8 width, u8 clk_divider_flags,
                                    const struct clk_div_table *table,
index fece503..101e0a3 100644 (file)
@@ -201,8 +201,9 @@ static const struct clk_ops clk_frac_pll_ops = {
        .set_rate       = clk_pll_set_rate,
 };
 
-struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
-                            void __iomem *base)
+struct clk_hw *imx_clk_hw_frac_pll(const char *name,
+                                  const char *parent_name,
+                                  void __iomem *base)
 {
        struct clk_init_data init;
        struct clk_frac_pll *pll;
@@ -230,5 +231,5 @@ struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
                return ERR_PTR(ret);
        }
 
-       return hw->clk;
+       return hw;
 }
index 60f2de8..ba33c79 100644 (file)
@@ -598,7 +598,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        }
 
        hws[IMX6QDL_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
-       hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+       if (clk_on_imx6q() || clk_on_imx6qp())
+               hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = imx_clk_hw_fixed_factor("pll4_audio_div", "pll4_post_div", 1, 1);
+       else
+               hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
        hws[IMX6QDL_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
        hws[IMX6QDL_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
 
index 281191b..0620d6c 100644 (file)
@@ -59,7 +59,7 @@ static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata
 static void __init imx7ulp_clk_scg1_init(struct device_node *np)
 {
        struct clk_hw_onecell_data *clk_data;
-       struct clk_hw **clks;
+       struct clk_hw **hws;
        void __iomem *base;
 
        clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SCG1_END),
@@ -68,76 +68,76 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
                return;
 
        clk_data->num = IMX7ULP_CLK_SCG1_END;
-       clks = clk_data->hws;
+       hws = clk_data->hws;
 
-       clks[IMX7ULP_CLK_DUMMY]         = imx_clk_hw_fixed("dummy", 0);
+       hws[IMX7ULP_CLK_DUMMY]          = imx_clk_hw_fixed("dummy", 0);
 
-       clks[IMX7ULP_CLK_ROSC]          = imx_obtain_fixed_clk_hw(np, "rosc");
-       clks[IMX7ULP_CLK_SOSC]          = imx_obtain_fixed_clk_hw(np, "sosc");
-       clks[IMX7ULP_CLK_SIRC]          = imx_obtain_fixed_clk_hw(np, "sirc");
-       clks[IMX7ULP_CLK_FIRC]          = imx_obtain_fixed_clk_hw(np, "firc");
-       clks[IMX7ULP_CLK_UPLL]          = imx_obtain_fixed_clk_hw(np, "upll");
+       hws[IMX7ULP_CLK_ROSC]           = imx_obtain_fixed_clk_hw(np, "rosc");
+       hws[IMX7ULP_CLK_SOSC]           = imx_obtain_fixed_clk_hw(np, "sosc");
+       hws[IMX7ULP_CLK_SIRC]           = imx_obtain_fixed_clk_hw(np, "sirc");
+       hws[IMX7ULP_CLK_FIRC]           = imx_obtain_fixed_clk_hw(np, "firc");
+       hws[IMX7ULP_CLK_UPLL]           = imx_obtain_fixed_clk_hw(np, "upll");
 
        /* SCG1 */
        base = of_iomap(np, 0);
        WARN_ON(!base);
 
        /* NOTE: xPLL config can't be changed when xPLL is enabled */
-       clks[IMX7ULP_CLK_APLL_PRE_SEL]  = imx_clk_hw_mux_flags("apll_pre_sel", base + 0x508, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
-       clks[IMX7ULP_CLK_SPLL_PRE_SEL]  = imx_clk_hw_mux_flags("spll_pre_sel", base + 0x608, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+       hws[IMX7ULP_CLK_APLL_PRE_SEL]   = imx_clk_hw_mux_flags("apll_pre_sel", base + 0x508, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+       hws[IMX7ULP_CLK_SPLL_PRE_SEL]   = imx_clk_hw_mux_flags("spll_pre_sel", base + 0x608, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
 
        /*                                                         name             parent_name    reg                  shift   width   flags */
-       clks[IMX7ULP_CLK_APLL_PRE_DIV]  = imx_clk_hw_divider_flags("apll_pre_div", "apll_pre_sel", base + 0x508,        8,      3,      CLK_SET_RATE_GATE);
-       clks[IMX7ULP_CLK_SPLL_PRE_DIV]  = imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608,        8,      3,      CLK_SET_RATE_GATE);
+       hws[IMX7ULP_CLK_APLL_PRE_DIV]   = imx_clk_hw_divider_flags("apll_pre_div", "apll_pre_sel", base + 0x508,        8,      3,      CLK_SET_RATE_GATE);
+       hws[IMX7ULP_CLK_SPLL_PRE_DIV]   = imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608,        8,      3,      CLK_SET_RATE_GATE);
 
        /*                                              name     parent_name     base */
-       clks[IMX7ULP_CLK_APLL]          = imx_clk_pllv4("apll",  "apll_pre_div", base + 0x500);
-       clks[IMX7ULP_CLK_SPLL]          = imx_clk_pllv4("spll",  "spll_pre_div", base + 0x600);
+       hws[IMX7ULP_CLK_APLL]           = imx_clk_hw_pllv4("apll",  "apll_pre_div", base + 0x500);
+       hws[IMX7ULP_CLK_SPLL]           = imx_clk_hw_pllv4("spll",  "spll_pre_div", base + 0x600);
 
        /* APLL PFDs */
-       clks[IMX7ULP_CLK_APLL_PFD0]     = imx_clk_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
-       clks[IMX7ULP_CLK_APLL_PFD1]     = imx_clk_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
-       clks[IMX7ULP_CLK_APLL_PFD2]     = imx_clk_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
-       clks[IMX7ULP_CLK_APLL_PFD3]     = imx_clk_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
+       hws[IMX7ULP_CLK_APLL_PFD0]      = imx_clk_hw_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
+       hws[IMX7ULP_CLK_APLL_PFD1]      = imx_clk_hw_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
+       hws[IMX7ULP_CLK_APLL_PFD2]      = imx_clk_hw_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
+       hws[IMX7ULP_CLK_APLL_PFD3]      = imx_clk_hw_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
 
        /* SPLL PFDs */
-       clks[IMX7ULP_CLK_SPLL_PFD0]     = imx_clk_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
-       clks[IMX7ULP_CLK_SPLL_PFD1]     = imx_clk_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
-       clks[IMX7ULP_CLK_SPLL_PFD2]     = imx_clk_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
-       clks[IMX7ULP_CLK_SPLL_PFD3]     = imx_clk_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
+       hws[IMX7ULP_CLK_SPLL_PFD0]      = imx_clk_hw_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
+       hws[IMX7ULP_CLK_SPLL_PFD1]      = imx_clk_hw_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
+       hws[IMX7ULP_CLK_SPLL_PFD2]      = imx_clk_hw_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
+       hws[IMX7ULP_CLK_SPLL_PFD3]      = imx_clk_hw_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
 
        /* PLL Mux */
-       clks[IMX7ULP_CLK_APLL_PFD_SEL]  = imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
-       clks[IMX7ULP_CLK_SPLL_PFD_SEL]  = imx_clk_hw_mux_flags("spll_pfd_sel", base + 0x608, 14, 2, spll_pfd_sels, ARRAY_SIZE(spll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
-       clks[IMX7ULP_CLK_APLL_SEL]      = imx_clk_hw_mux_flags("apll_sel", base + 0x508, 1, 1, apll_sels, ARRAY_SIZE(apll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
-       clks[IMX7ULP_CLK_SPLL_SEL]      = imx_clk_hw_mux_flags("spll_sel", base + 0x608, 1, 1, spll_sels, ARRAY_SIZE(spll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+       hws[IMX7ULP_CLK_APLL_PFD_SEL]   = imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+       hws[IMX7ULP_CLK_SPLL_PFD_SEL]   = imx_clk_hw_mux_flags("spll_pfd_sel", base + 0x608, 14, 2, spll_pfd_sels, ARRAY_SIZE(spll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+       hws[IMX7ULP_CLK_APLL_SEL]       = imx_clk_hw_mux_flags("apll_sel", base + 0x508, 1, 1, apll_sels, ARRAY_SIZE(apll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+       hws[IMX7ULP_CLK_SPLL_SEL]       = imx_clk_hw_mux_flags("spll_sel", base + 0x608, 1, 1, spll_sels, ARRAY_SIZE(spll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
 
-       clks[IMX7ULP_CLK_SPLL_BUS_CLK]  = imx_clk_divider_gate("spll_bus_clk", "spll_sel", CLK_SET_RATE_GATE, base + 0x604, 8, 3, 0, ulp_div_table, &imx_ccm_lock);
+       hws[IMX7ULP_CLK_SPLL_BUS_CLK]   = imx_clk_hw_divider_gate("spll_bus_clk", "spll_sel", CLK_SET_RATE_GATE, base + 0x604, 8, 3, 0, ulp_div_table, &imx_ccm_lock);
 
        /* scs/ddr/nic select different clock source requires that clock to be enabled first */
-       clks[IMX7ULP_CLK_SYS_SEL]       = imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
-       clks[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
-       clks[IMX7ULP_CLK_NIC_SEL]       = imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
-       clks[IMX7ULP_CLK_DDR_SEL]       = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+       hws[IMX7ULP_CLK_SYS_SEL]        = imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
+       hws[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
+       hws[IMX7ULP_CLK_NIC_SEL]        = imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
+       hws[IMX7ULP_CLK_DDR_SEL]        = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
 
-       clks[IMX7ULP_CLK_CORE_DIV]      = imx_clk_hw_divider_flags("divcore",   "scs_sel",  base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
-       clks[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
+       hws[IMX7ULP_CLK_CORE_DIV]       = imx_clk_hw_divider_flags("divcore",   "scs_sel",  base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
+       hws[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
 
-       clks[IMX7ULP_CLK_DDR_DIV]       = imx_clk_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
+       hws[IMX7ULP_CLK_DDR_DIV]        = imx_clk_hw_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
                                                               0, ulp_div_table, &imx_ccm_lock);
 
-       clks[IMX7ULP_CLK_NIC0_DIV]      = imx_clk_hw_divider_flags("nic0_clk",          "nic_sel",  base + 0x40, 24, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
-       clks[IMX7ULP_CLK_NIC1_DIV]      = imx_clk_hw_divider_flags("nic1_clk",          "nic0_clk", base + 0x40, 16, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
-       clks[IMX7ULP_CLK_NIC1_BUS_DIV]  = imx_clk_hw_divider_flags("nic1_bus_clk",      "nic0_clk", base + 0x40, 4,  4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+       hws[IMX7ULP_CLK_NIC0_DIV]       = imx_clk_hw_divider_flags("nic0_clk",          "nic_sel",  base + 0x40, 24, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+       hws[IMX7ULP_CLK_NIC1_DIV]       = imx_clk_hw_divider_flags("nic1_clk",          "nic0_clk", base + 0x40, 16, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+       hws[IMX7ULP_CLK_NIC1_BUS_DIV]   = imx_clk_hw_divider_flags("nic1_bus_clk",      "nic0_clk", base + 0x40, 4,  4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
 
-       clks[IMX7ULP_CLK_GPU_DIV]       = imx_clk_hw_divider("gpu_clk", "nic0_clk", base + 0x40, 20, 4);
+       hws[IMX7ULP_CLK_GPU_DIV]        = imx_clk_hw_divider("gpu_clk", "nic0_clk", base + 0x40, 20, 4);
 
-       clks[IMX7ULP_CLK_SOSC_BUS_CLK]  = imx_clk_divider_gate("sosc_bus_clk", "sosc", 0, base + 0x104, 8, 3,
+       hws[IMX7ULP_CLK_SOSC_BUS_CLK]   = imx_clk_hw_divider_gate("sosc_bus_clk", "sosc", 0, base + 0x104, 8, 3,
                                                               CLK_DIVIDER_READ_ONLY, ulp_div_table, &imx_ccm_lock);
-       clks[IMX7ULP_CLK_FIRC_BUS_CLK]  = imx_clk_divider_gate("firc_bus_clk", "firc", 0, base + 0x304, 8, 3,
+       hws[IMX7ULP_CLK_FIRC_BUS_CLK]   = imx_clk_hw_divider_gate("firc_bus_clk", "firc", 0, base + 0x304, 8, 3,
                                                               CLK_DIVIDER_READ_ONLY, ulp_div_table, &imx_ccm_lock);
 
-       imx_check_clk_hws(clks, clk_data->num);
+       imx_check_clk_hws(hws, clk_data->num);
 
        of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 }
@@ -146,7 +146,7 @@ CLK_OF_DECLARE(imx7ulp_clk_scg1, "fsl,imx7ulp-scg1", imx7ulp_clk_scg1_init);
 static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
 {
        struct clk_hw_onecell_data *clk_data;
-       struct clk_hw **clks;
+       struct clk_hw **hws;
        void __iomem *base;
        int i;
 
@@ -156,42 +156,42 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
                return;
 
        clk_data->num = IMX7ULP_CLK_PCC2_END;
-       clks = clk_data->hws;
+       hws = clk_data->hws;
 
        /* PCC2 */
        base = of_iomap(np, 0);
        WARN_ON(!base);
 
-       clks[IMX7ULP_CLK_DMA1]          = imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
-       clks[IMX7ULP_CLK_RGPIO2P1]      = imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
-       clks[IMX7ULP_CLK_DMA_MUX1]      = imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
-       clks[IMX7ULP_CLK_CAAM]          = imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
-       clks[IMX7ULP_CLK_LPTPM4]        = imx7ulp_clk_composite("lptpm4",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
-       clks[IMX7ULP_CLK_LPTPM5]        = imx7ulp_clk_composite("lptpm5",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
-       clks[IMX7ULP_CLK_LPIT1]         = imx7ulp_clk_composite("lpit1",   periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
-       clks[IMX7ULP_CLK_LPSPI2]        = imx7ulp_clk_composite("lpspi2",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa4);
-       clks[IMX7ULP_CLK_LPSPI3]        = imx7ulp_clk_composite("lpspi3",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa8);
-       clks[IMX7ULP_CLK_LPI2C4]        = imx7ulp_clk_composite("lpi2c4",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xac);
-       clks[IMX7ULP_CLK_LPI2C5]        = imx7ulp_clk_composite("lpi2c5",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb0);
-       clks[IMX7ULP_CLK_LPUART4]       = imx7ulp_clk_composite("lpuart4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb4);
-       clks[IMX7ULP_CLK_LPUART5]       = imx7ulp_clk_composite("lpuart5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb8);
-       clks[IMX7ULP_CLK_FLEXIO1]       = imx7ulp_clk_composite("flexio1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xc4);
-       clks[IMX7ULP_CLK_USB0]          = imx7ulp_clk_composite("usb0",    periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xcc);
-       clks[IMX7ULP_CLK_USB1]          = imx7ulp_clk_composite("usb1",    periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xd0);
-       clks[IMX7ULP_CLK_USB_PHY]       = imx_clk_hw_gate("usb_phy", "nic1_bus_clk", base + 0xd4, 30);
-       clks[IMX7ULP_CLK_USDHC0]        = imx7ulp_clk_composite("usdhc0",  periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xdc);
-       clks[IMX7ULP_CLK_USDHC1]        = imx7ulp_clk_composite("usdhc1",  periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xe0);
-       clks[IMX7ULP_CLK_WDG1]          = imx7ulp_clk_composite("wdg1",    periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0xf4);
-       clks[IMX7ULP_CLK_WDG2]          = imx7ulp_clk_composite("sdg2",    periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0x10c);
-
-       imx_check_clk_hws(clks, clk_data->num);
+       hws[IMX7ULP_CLK_DMA1]           = imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
+       hws[IMX7ULP_CLK_RGPIO2P1]       = imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
+       hws[IMX7ULP_CLK_DMA_MUX1]       = imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
+       hws[IMX7ULP_CLK_CAAM]           = imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
+       hws[IMX7ULP_CLK_LPTPM4]         = imx7ulp_clk_hw_composite("lptpm4",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
+       hws[IMX7ULP_CLK_LPTPM5]         = imx7ulp_clk_hw_composite("lptpm5",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
+       hws[IMX7ULP_CLK_LPIT1]          = imx7ulp_clk_hw_composite("lpit1",   periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
+       hws[IMX7ULP_CLK_LPSPI2]         = imx7ulp_clk_hw_composite("lpspi2",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa4);
+       hws[IMX7ULP_CLK_LPSPI3]         = imx7ulp_clk_hw_composite("lpspi3",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa8);
+       hws[IMX7ULP_CLK_LPI2C4]         = imx7ulp_clk_hw_composite("lpi2c4",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xac);
+       hws[IMX7ULP_CLK_LPI2C5]         = imx7ulp_clk_hw_composite("lpi2c5",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb0);
+       hws[IMX7ULP_CLK_LPUART4]        = imx7ulp_clk_hw_composite("lpuart4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb4);
+       hws[IMX7ULP_CLK_LPUART5]        = imx7ulp_clk_hw_composite("lpuart5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb8);
+       hws[IMX7ULP_CLK_FLEXIO1]        = imx7ulp_clk_hw_composite("flexio1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xc4);
+       hws[IMX7ULP_CLK_USB0]           = imx7ulp_clk_hw_composite("usb0",    periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xcc);
+       hws[IMX7ULP_CLK_USB1]           = imx7ulp_clk_hw_composite("usb1",    periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xd0);
+       hws[IMX7ULP_CLK_USB_PHY]        = imx_clk_hw_gate("usb_phy", "nic1_bus_clk", base + 0xd4, 30);
+       hws[IMX7ULP_CLK_USDHC0]         = imx7ulp_clk_hw_composite("usdhc0",  periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xdc);
+       hws[IMX7ULP_CLK_USDHC1]         = imx7ulp_clk_hw_composite("usdhc1",  periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xe0);
+       hws[IMX7ULP_CLK_WDG1]           = imx7ulp_clk_hw_composite("wdg1",    periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0xf4);
+       hws[IMX7ULP_CLK_WDG2]           = imx7ulp_clk_hw_composite("wdg2",    periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0x10c);
+
+       imx_check_clk_hws(hws, clk_data->num);
 
        of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 
        for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
                int index = pcc2_uart_clk_ids[i];
 
-               pcc2_uart_clks[i] = &clks[index]->clk;
+               pcc2_uart_clks[i] = &hws[index]->clk;
        }
 
        imx_register_uart_clocks(pcc2_uart_clks);
@@ -201,7 +201,7 @@ CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
 static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
 {
        struct clk_hw_onecell_data *clk_data;
-       struct clk_hw **clks;
+       struct clk_hw **hws;
        void __iomem *base;
        int i;
 
@@ -211,41 +211,41 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
                return;
 
        clk_data->num = IMX7ULP_CLK_PCC3_END;
-       clks = clk_data->hws;
+       hws = clk_data->hws;
 
        /* PCC3 */
        base = of_iomap(np, 0);
        WARN_ON(!base);
 
-       clks[IMX7ULP_CLK_LPTPM6]        = imx7ulp_clk_composite("lptpm6",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x84);
-       clks[IMX7ULP_CLK_LPTPM7]        = imx7ulp_clk_composite("lptpm7",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x88);
+       hws[IMX7ULP_CLK_LPTPM6] = imx7ulp_clk_hw_composite("lptpm6",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x84);
+       hws[IMX7ULP_CLK_LPTPM7] = imx7ulp_clk_hw_composite("lptpm7",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x88);
 
-       clks[IMX7ULP_CLK_MMDC]          = clk_hw_register_gate(NULL, "mmdc", "nic1_clk", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+       hws[IMX7ULP_CLK_MMDC]           = clk_hw_register_gate(NULL, "mmdc", "nic1_clk", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
                                                               base + 0xac, 30, 0, &imx_ccm_lock);
-       clks[IMX7ULP_CLK_LPI2C6]        = imx7ulp_clk_composite("lpi2c6",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x90);
-       clks[IMX7ULP_CLK_LPI2C7]        = imx7ulp_clk_composite("lpi2c7",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
-       clks[IMX7ULP_CLK_LPUART6]       = imx7ulp_clk_composite("lpuart6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
-       clks[IMX7ULP_CLK_LPUART7]       = imx7ulp_clk_composite("lpuart7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
-       clks[IMX7ULP_CLK_DSI]           = imx7ulp_clk_composite("dsi",     periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0xa4);
-       clks[IMX7ULP_CLK_LCDIF]         = imx7ulp_clk_composite("lcdif",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xa8);
+       hws[IMX7ULP_CLK_LPI2C6] = imx7ulp_clk_hw_composite("lpi2c6",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x90);
+       hws[IMX7ULP_CLK_LPI2C7] = imx7ulp_clk_hw_composite("lpi2c7",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
+       hws[IMX7ULP_CLK_LPUART6]        = imx7ulp_clk_hw_composite("lpuart6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
+       hws[IMX7ULP_CLK_LPUART7]        = imx7ulp_clk_hw_composite("lpuart7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
+       hws[IMX7ULP_CLK_DSI]            = imx7ulp_clk_hw_composite("dsi",     periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0xa4);
+       hws[IMX7ULP_CLK_LCDIF]          = imx7ulp_clk_hw_composite("lcdif",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xa8);
 
-       clks[IMX7ULP_CLK_VIU]           = imx_clk_hw_gate("viu",   "nic1_clk",     base + 0xa0, 30);
-       clks[IMX7ULP_CLK_PCTLC]         = imx_clk_hw_gate("pctlc", "nic1_bus_clk", base + 0xb8, 30);
-       clks[IMX7ULP_CLK_PCTLD]         = imx_clk_hw_gate("pctld", "nic1_bus_clk", base + 0xbc, 30);
-       clks[IMX7ULP_CLK_PCTLE]         = imx_clk_hw_gate("pctle", "nic1_bus_clk", base + 0xc0, 30);
-       clks[IMX7ULP_CLK_PCTLF]         = imx_clk_hw_gate("pctlf", "nic1_bus_clk", base + 0xc4, 30);
+       hws[IMX7ULP_CLK_VIU]            = imx_clk_hw_gate("viu",   "nic1_clk",     base + 0xa0, 30);
+       hws[IMX7ULP_CLK_PCTLC]          = imx_clk_hw_gate("pctlc", "nic1_bus_clk", base + 0xb8, 30);
+       hws[IMX7ULP_CLK_PCTLD]          = imx_clk_hw_gate("pctld", "nic1_bus_clk", base + 0xbc, 30);
+       hws[IMX7ULP_CLK_PCTLE]          = imx_clk_hw_gate("pctle", "nic1_bus_clk", base + 0xc0, 30);
+       hws[IMX7ULP_CLK_PCTLF]          = imx_clk_hw_gate("pctlf", "nic1_bus_clk", base + 0xc4, 30);
 
-       clks[IMX7ULP_CLK_GPU3D]         = imx7ulp_clk_composite("gpu3d",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x140);
-       clks[IMX7ULP_CLK_GPU2D]         = imx7ulp_clk_composite("gpu2d",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x144);
+       hws[IMX7ULP_CLK_GPU3D]          = imx7ulp_clk_hw_composite("gpu3d",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x140);
+       hws[IMX7ULP_CLK_GPU2D]          = imx7ulp_clk_hw_composite("gpu2d",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x144);
 
-       imx_check_clk_hws(clks, clk_data->num);
+       imx_check_clk_hws(hws, clk_data->num);
 
        of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 
        for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
                int index = pcc3_uart_clk_ids[i];
 
-               pcc3_uart_clks[i] = &clks[index]->clk;
+               pcc3_uart_clks[i] = &hws[index]->clk;
        }
 
        imx_register_uart_clocks(pcc3_uart_clks);
@@ -255,7 +255,7 @@ CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
 static void __init imx7ulp_clk_smc1_init(struct device_node *np)
 {
        struct clk_hw_onecell_data *clk_data;
-       struct clk_hw **clks;
+       struct clk_hw **hws;
        void __iomem *base;
 
        clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SMC1_END),
@@ -264,15 +264,15 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
                return;
 
        clk_data->num = IMX7ULP_CLK_SMC1_END;
-       clks = clk_data->hws;
+       hws = clk_data->hws;
 
        /* SMC1 */
        base = of_iomap(np, 0);
        WARN_ON(!base);
 
-       clks[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
+       hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
 
-       imx_check_clk_hws(clks, clk_data->num);
+       imx_check_clk_hws(hws, clk_data->num);
 
        of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 }
index 030b15d..2ed93fc 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 #include <linux/types.h>
 
 #include "clk.h"
@@ -285,118 +286,126 @@ static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }
 static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out",
                                         "vpu_pll", "sys_pll1_80m", };
 
-static struct clk *clks[IMX8MM_CLK_END];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
 
-static struct clk ** const uart_clks[] = {
-       &clks[IMX8MM_CLK_UART1_ROOT],
-       &clks[IMX8MM_CLK_UART2_ROOT],
-       &clks[IMX8MM_CLK_UART3_ROOT],
-       &clks[IMX8MM_CLK_UART4_ROOT],
-       NULL
+static const int uart_clk_ids[] = {
+       IMX8MM_CLK_UART1_ROOT,
+       IMX8MM_CLK_UART2_ROOT,
+       IMX8MM_CLK_UART3_ROOT,
+       IMX8MM_CLK_UART4_ROOT,
 };
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
 
 static int imx8mm_clocks_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        void __iomem *base;
-       int ret;
+       int ret, i;
 
-       clks[IMX8MM_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
-       clks[IMX8MM_CLK_24M] = of_clk_get_by_name(np, "osc_24m");
-       clks[IMX8MM_CLK_32K] = of_clk_get_by_name(np, "osc_32k");
-       clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
-       clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
-       clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
-       clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+       clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+                                         IMX8MM_CLK_END), GFP_KERNEL);
+       if (WARN_ON(!clk_hw_data))
+               return -ENOMEM;
+
+       clk_hw_data->num = IMX8MM_CLK_END;
+       hws = clk_hw_data->hws;
+
+       hws[IMX8MM_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+       hws[IMX8MM_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+       hws[IMX8MM_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+       hws[IMX8MM_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+       hws[IMX8MM_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+       hws[IMX8MM_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+       hws[IMX8MM_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
        base = of_iomap(np, 0);
        if (WARN_ON(!base))
                return -ENOMEM;
 
-       clks[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-
-       clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
-       clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
-       clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
-       clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
-       clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
-       clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
-       clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
-       clks[IMX8MM_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
-       clks[IMX8MM_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
-       clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
+       hws[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+       hws[IMX8MM_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+       hws[IMX8MM_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+       hws[IMX8MM_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+       hws[IMX8MM_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll);
+       hws[IMX8MM_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+       hws[IMX8MM_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+       hws[IMX8MM_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+       hws[IMX8MM_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000);
+       hws[IMX8MM_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000);
+       hws[IMX8MM_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
 
        /* PLL bypass out */
-       clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MM_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MM_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MM_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
 
        /* PLL out gate */
-       clks[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
-       clks[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
-       clks[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
-       clks[IMX8MM_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
-       clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
-       clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
-       clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
-       clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
+       hws[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+       hws[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+       hws[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+       hws[IMX8MM_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+       hws[IMX8MM_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
+       hws[IMX8MM_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
+       hws[IMX8MM_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
+       hws[IMX8MM_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
 
        /* SYS PLL1 fixed output */
-       clks[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
-       clks[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
-       clks[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
-       clks[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
-       clks[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
-       clks[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
-       clks[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
-       clks[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
-       clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
-
-       clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
-       clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
-       clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
-       clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
-       clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
-       clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
-       clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
-       clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
-       clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+       hws[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+       hws[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+       hws[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+       hws[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+       hws[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+       hws[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+       hws[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+       hws[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+       hws[IMX8MM_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+
+       hws[IMX8MM_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+       hws[IMX8MM_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+       hws[IMX8MM_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+       hws[IMX8MM_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+       hws[IMX8MM_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+       hws[IMX8MM_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+       hws[IMX8MM_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+       hws[IMX8MM_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+       hws[IMX8MM_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
 
        /* SYS PLL2 fixed output */
-       clks[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
-       clks[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
-       clks[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
-       clks[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
-       clks[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
-       clks[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
-       clks[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
-       clks[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
-       clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
-
-       clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
-       clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
-       clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
-       clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
-       clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
-       clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
-       clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
-       clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
-       clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+       hws[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+       hws[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+       hws[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+       hws[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+       hws[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+       hws[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+       hws[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+       hws[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+       hws[IMX8MM_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+
+       hws[IMX8MM_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+       hws[IMX8MM_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+       hws[IMX8MM_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+       hws[IMX8MM_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+       hws[IMX8MM_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+       hws[IMX8MM_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+       hws[IMX8MM_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+       hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+       hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
 
        np = dev->of_node;
        base = devm_platform_ioremap_resource(pdev, 0);
@@ -404,204 +413,213 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
                return PTR_ERR(base);
 
        /* Core Slice */
-       clks[IMX8MM_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
-       clks[IMX8MM_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
-       clks[IMX8MM_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
-       clks[IMX8MM_CLK_GPU3D_SRC] = imx_clk_mux2("gpu3d_src", base + 0x8180, 24, 3,  imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
-       clks[IMX8MM_CLK_GPU2D_SRC] = imx_clk_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels,  ARRAY_SIZE(imx8mm_gpu2d_sels));
-       clks[IMX8MM_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
-       clks[IMX8MM_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
-       clks[IMX8MM_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
-       clks[IMX8MM_CLK_GPU3D_CG] = imx_clk_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
-       clks[IMX8MM_CLK_GPU2D_CG] = imx_clk_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
-       clks[IMX8MM_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
-       clks[IMX8MM_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
-       clks[IMX8MM_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
-       clks[IMX8MM_CLK_GPU3D_DIV] = imx_clk_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
-       clks[IMX8MM_CLK_GPU2D_DIV] = imx_clk_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
+       hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
+       hws[IMX8MM_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
+       hws[IMX8MM_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
+       hws[IMX8MM_CLK_GPU3D_SRC] = imx_clk_hw_mux2("gpu3d_src", base + 0x8180, 24, 3,  imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
+       hws[IMX8MM_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels,  ARRAY_SIZE(imx8mm_gpu2d_sels));
+       hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+       hws[IMX8MM_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+       hws[IMX8MM_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+       hws[IMX8MM_CLK_GPU3D_CG] = imx_clk_hw_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
+       hws[IMX8MM_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
+       hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+       hws[IMX8MM_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+       hws[IMX8MM_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+       hws[IMX8MM_CLK_GPU3D_DIV] = imx_clk_hw_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
+       hws[IMX8MM_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
 
        /* BUS */
-       clks[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi",  imx8mm_main_axi_sels, base + 0x8800);
-       clks[IMX8MM_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
-       clks[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
-       clks[IMX8MM_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
-       clks[IMX8MM_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
-       clks[IMX8MM_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
-       clks[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
-       clks[IMX8MM_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
-       clks[IMX8MM_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
-       clks[IMX8MM_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
-       clks[IMX8MM_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
-       clks[IMX8MM_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
+       hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi",  imx8mm_main_axi_sels, base + 0x8800);
+       hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
+       hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+       hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
+       hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
+       hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
+       hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
+       hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
+       hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
+       hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
+       hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
+       hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
 
        /* AHB */
-       clks[IMX8MM_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
-       clks[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
+       hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
+       hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
 
        /* IPG */
-       clks[IMX8MM_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
-       clks[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+       hws[IMX8MM_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+       hws[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+       /*
+        * DRAM clocks are manipulated from TF-A outside clock framework.
+        * Mark with GET_RATE_NOCACHE to always read div value from hardware
+        */
+       hws[IMX8MM_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+       hws[IMX8MM_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
 
        /* IP */
-       clks[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
-       clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
-       clks[IMX8MM_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
-       clks[IMX8MM_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
-       clks[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
-       clks[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
-       clks[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
-       clks[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
-       clks[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
-       clks[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
-       clks[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
-       clks[IMX8MM_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
-       clks[IMX8MM_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
-       clks[IMX8MM_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
-       clks[IMX8MM_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
-       clks[IMX8MM_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
-       clks[IMX8MM_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
-       clks[IMX8MM_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
-       clks[IMX8MM_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
-       clks[IMX8MM_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
-       clks[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
-       clks[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
-       clks[IMX8MM_CLK_NAND] = imx8m_clk_composite("nand", imx8mm_nand_sels, base + 0xab00);
-       clks[IMX8MM_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
-       clks[IMX8MM_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
-       clks[IMX8MM_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
-       clks[IMX8MM_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
-       clks[IMX8MM_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
-       clks[IMX8MM_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
-       clks[IMX8MM_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
-       clks[IMX8MM_CLK_UART1] = imx8m_clk_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
-       clks[IMX8MM_CLK_UART2] = imx8m_clk_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
-       clks[IMX8MM_CLK_UART3] = imx8m_clk_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
-       clks[IMX8MM_CLK_UART4] = imx8m_clk_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
-       clks[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
-       clks[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
-       clks[IMX8MM_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mm_gic_sels, base + 0xb200);
-       clks[IMX8MM_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
-       clks[IMX8MM_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
-       clks[IMX8MM_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
-       clks[IMX8MM_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
-       clks[IMX8MM_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
-       clks[IMX8MM_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
-       clks[IMX8MM_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
-       clks[IMX8MM_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
-       clks[IMX8MM_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
-       clks[IMX8MM_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
-       clks[IMX8MM_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
-       clks[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
-       clks[IMX8MM_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
-       clks[IMX8MM_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
-       clks[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
-       clks[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
-       clks[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
-       clks[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
-       clks[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
-       clks[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
-       clks[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
-       clks[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
-       clks[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
-       clks[IMX8MM_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
-       clks[IMX8MM_CLK_PDM] = imx8m_clk_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
-       clks[IMX8MM_CLK_VPU_H1] = imx8m_clk_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
+       hws[IMX8MM_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
+       hws[IMX8MM_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
+       hws[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_hw_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
+       hws[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_hw_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
+       hws[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_hw_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
+       hws[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_hw_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
+       hws[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_hw_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
+       hws[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_hw_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
+       hws[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_hw_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
+       hws[IMX8MM_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
+       hws[IMX8MM_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
+       hws[IMX8MM_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
+       hws[IMX8MM_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
+       hws[IMX8MM_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
+       hws[IMX8MM_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
+       hws[IMX8MM_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
+       hws[IMX8MM_CLK_SPDIF2] = imx8m_clk_hw_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
+       hws[IMX8MM_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
+       hws[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
+       hws[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
+       hws[IMX8MM_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mm_nand_sels, base + 0xab00);
+       hws[IMX8MM_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
+       hws[IMX8MM_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
+       hws[IMX8MM_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
+       hws[IMX8MM_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
+       hws[IMX8MM_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
+       hws[IMX8MM_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
+       hws[IMX8MM_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
+       hws[IMX8MM_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
+       hws[IMX8MM_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
+       hws[IMX8MM_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
+       hws[IMX8MM_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
+       hws[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
+       hws[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
+       hws[IMX8MM_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mm_gic_sels, base + 0xb200);
+       hws[IMX8MM_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
+       hws[IMX8MM_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
+       hws[IMX8MM_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
+       hws[IMX8MM_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
+       hws[IMX8MM_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
+       hws[IMX8MM_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
+       hws[IMX8MM_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
+       hws[IMX8MM_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
+       hws[IMX8MM_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
+       hws[IMX8MM_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
+       hws[IMX8MM_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
+       hws[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
+       hws[IMX8MM_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
+       hws[IMX8MM_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
+       hws[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_hw_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
+       hws[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
+       hws[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_hw_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
+       hws[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_hw_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
+       hws[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
+       hws[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
+       hws[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
+       hws[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
+       hws[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_hw_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
+       hws[IMX8MM_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
+       hws[IMX8MM_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
+       hws[IMX8MM_CLK_VPU_H1] = imx8m_clk_hw_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
 
        /* CCGR */
-       clks[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
-       clks[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
-       clks[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
-       clks[IMX8MM_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
-       clks[IMX8MM_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
-       clks[IMX8MM_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
-       clks[IMX8MM_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
-       clks[IMX8MM_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
-       clks[IMX8MM_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
-       clks[IMX8MM_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
-       clks[IMX8MM_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
-       clks[IMX8MM_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
-       clks[IMX8MM_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
-       clks[IMX8MM_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
-       clks[IMX8MM_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
-       clks[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
-       clks[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
-       clks[IMX8MM_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
-       clks[IMX8MM_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
-       clks[IMX8MM_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
-       clks[IMX8MM_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
-       clks[IMX8MM_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
-       clks[IMX8MM_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
-       clks[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
-       clks[IMX8MM_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
-       clks[IMX8MM_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
-       clks[IMX8MM_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
-       clks[IMX8MM_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
-       clks[IMX8MM_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
-       clks[IMX8MM_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
-       clks[IMX8MM_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
-       clks[IMX8MM_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
-       clks[IMX8MM_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
-       clks[IMX8MM_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
-       clks[IMX8MM_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
-       clks[IMX8MM_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
-       clks[IMX8MM_CLK_SNVS_ROOT] = imx_clk_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
-       clks[IMX8MM_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
-       clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
-       clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
-       clks[IMX8MM_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
-       clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
-       clks[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
-       clks[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
-       clks[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
-       clks[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
-       clks[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
-       clks[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
-       clks[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
-       clks[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
-       clks[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
-       clks[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
-       clks[IMX8MM_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
-       clks[IMX8MM_CLK_PDM_IPG]  = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
-       clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_disp);
-       clks[IMX8MM_CLK_DISP_AXI_ROOT]  = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
-       clks[IMX8MM_CLK_DISP_APB_ROOT]  = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
-       clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_disp);
-       clks[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
-       clks[IMX8MM_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
-       clks[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
-       clks[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
-       clks[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
-       clks[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
-       clks[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
-       clks[IMX8MM_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
-
-       clks[IMX8MM_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc_24m", 1, 8);
-
-       clks[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
-       clks[IMX8MM_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
-
-       clks[IMX8MM_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
-                                          clks[IMX8MM_CLK_A53_DIV],
-                                          clks[IMX8MM_CLK_A53_SRC],
-                                          clks[IMX8MM_ARM_PLL_OUT],
-                                          clks[IMX8MM_SYS_PLL1_800M]);
-
-       imx_check_clocks(clks, ARRAY_SIZE(clks));
-
-       clk_data.clks = clks;
-       clk_data.clk_num = ARRAY_SIZE(clks);
-       ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       hws[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+       hws[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+       hws[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+       hws[IMX8MM_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+       hws[IMX8MM_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+       hws[IMX8MM_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+       hws[IMX8MM_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+       hws[IMX8MM_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+       hws[IMX8MM_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+       hws[IMX8MM_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+       hws[IMX8MM_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+       hws[IMX8MM_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+       hws[IMX8MM_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+       hws[IMX8MM_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+       hws[IMX8MM_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+       hws[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+       hws[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_hw_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+       hws[IMX8MM_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+       hws[IMX8MM_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+       hws[IMX8MM_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+       hws[IMX8MM_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+       hws[IMX8MM_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+       hws[IMX8MM_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+       hws[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+       hws[IMX8MM_CLK_SAI1_ROOT] = imx_clk_hw_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+       hws[IMX8MM_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+       hws[IMX8MM_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+       hws[IMX8MM_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+       hws[IMX8MM_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+       hws[IMX8MM_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+       hws[IMX8MM_CLK_SAI4_ROOT] = imx_clk_hw_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+       hws[IMX8MM_CLK_SAI4_IPG] = imx_clk_hw_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+       hws[IMX8MM_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+       hws[IMX8MM_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+       hws[IMX8MM_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+       hws[IMX8MM_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+       hws[IMX8MM_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
+       hws[IMX8MM_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+       hws[IMX8MM_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+       hws[IMX8MM_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+       hws[IMX8MM_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+       hws[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+       hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
+       hws[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+       hws[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+       hws[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+       hws[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+       hws[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+       hws[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_hw_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
+       hws[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+       hws[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_hw_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
+       hws[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
+       hws[IMX8MM_CLK_PDM_ROOT] = imx_clk_hw_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+       hws[IMX8MM_CLK_PDM_IPG]  = imx_clk_hw_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+       hws[IMX8MM_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_disp);
+       hws[IMX8MM_CLK_DISP_AXI_ROOT]  = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
+       hws[IMX8MM_CLK_DISP_APB_ROOT]  = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
+       hws[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_hw_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_disp);
+       hws[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+       hws[IMX8MM_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+       hws[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_hw_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
+       hws[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+       hws[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+       hws[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+       hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
+       hws[IMX8MM_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+
+       hws[IMX8MM_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
+
+       hws[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+       hws[IMX8MM_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
+
+       hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+                                          hws[IMX8MM_CLK_A53_DIV]->clk,
+                                          hws[IMX8MM_CLK_A53_SRC]->clk,
+                                          hws[IMX8MM_ARM_PLL_OUT]->clk,
+                                          hws[IMX8MM_SYS_PLL1_800M]->clk);
+
+       imx_check_clk_hws(hws, IMX8MM_CLK_END);
+
+       ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
        if (ret < 0) {
-               pr_err("failed to register clks for i.MX8MM\n");
-               goto unregister_clks;
+               dev_err(dev, "failed to register clks for i.MX8MM\n");
+               goto unregister_hws;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+               int index = uart_clk_ids[i];
+
+               uart_hws[i] = &hws[index]->clk;
        }
 
-       imx_register_uart_clocks(uart_clks);
+       imx_register_uart_clocks(uart_hws);
 
        return 0;
 
-unregister_clks:
-       imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+       imx_unregister_hw_clocks(hws, IMX8MM_CLK_END);
 
        return ret;
 }
@@ -616,6 +634,11 @@ static struct platform_driver imx8mm_clk_driver = {
        .probe = imx8mm_clocks_probe,
        .driver = {
                .name = "imx8mm-ccm",
+               /*
+                * Disable bind attributes: clocks are not removed and
+                * reloading the driver will crash or break devices.
+                */
+               .suppress_bind_attrs = true,
                .of_match_table = of_match_ptr(imx8mm_clk_of_match),
        },
 };
index 9f5a5a5..c5e7316 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 #include <linux/types.h>
 
 #include "clk.h"
@@ -280,284 +281,302 @@ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sy
                                                 "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
                                                 "video_pll1_out", "osc_32k", };
 
-static struct clk *clks[IMX8MN_CLK_END];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
 
-static struct clk ** const uart_clks[] = {
-       &clks[IMX8MN_CLK_UART1_ROOT],
-       &clks[IMX8MN_CLK_UART2_ROOT],
-       &clks[IMX8MN_CLK_UART3_ROOT],
-       &clks[IMX8MN_CLK_UART4_ROOT],
-       NULL
+static const int uart_clk_ids[] = {
+       IMX8MN_CLK_UART1_ROOT,
+       IMX8MN_CLK_UART2_ROOT,
+       IMX8MN_CLK_UART3_ROOT,
+       IMX8MN_CLK_UART4_ROOT,
 };
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
 
 static int imx8mn_clocks_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        void __iomem *base;
-       int ret;
+       int ret, i;
 
-       clks[IMX8MN_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
-       clks[IMX8MN_CLK_24M] = of_clk_get_by_name(np, "osc_24m");
-       clks[IMX8MN_CLK_32K] = of_clk_get_by_name(np, "osc_32k");
-       clks[IMX8MN_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
-       clks[IMX8MN_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
-       clks[IMX8MN_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
-       clks[IMX8MN_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+       clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+                                         IMX8MN_CLK_END), GFP_KERNEL);
+       if (WARN_ON(!clk_hw_data))
+               return -ENOMEM;
+
+       clk_hw_data->num = IMX8MN_CLK_END;
+       hws = clk_hw_data->hws;
+
+       hws[IMX8MN_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+       hws[IMX8MN_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+       hws[IMX8MN_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+       hws[IMX8MN_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+       hws[IMX8MN_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+       hws[IMX8MN_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+       hws[IMX8MN_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop");
        base = of_iomap(np, 0);
        if (WARN_ON(!base)) {
                ret = -ENOMEM;
-               goto unregister_clks;
+               goto unregister_hws;
        }
 
-       clks[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-
-       clks[IMX8MN_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
-       clks[IMX8MN_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
-       clks[IMX8MN_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
-       clks[IMX8MN_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
-       clks[IMX8MN_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
-       clks[IMX8MN_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
-       clks[IMX8MN_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
-       clks[IMX8MN_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
-       clks[IMX8MN_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
-       clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
+       hws[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+       hws[IMX8MN_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+       hws[IMX8MN_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+       hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+       hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll);
+       hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+       hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+       hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+       hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000);
+       hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000);
+       hws[IMX8MN_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
 
        /* PLL bypass out */
-       clks[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MN_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MN_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MN_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
 
        /* PLL out gate */
-       clks[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
-       clks[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
-       clks[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
-       clks[IMX8MN_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
-       clks[IMX8MN_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
-       clks[IMX8MN_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
-       clks[IMX8MN_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
-       clks[IMX8MN_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
+       hws[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+       hws[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+       hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+       hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+       hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
+       hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
+       hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
+       hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
 
        /* SYS PLL1 fixed output */
-       clks[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
-       clks[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
-       clks[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
-       clks[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
-       clks[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
-       clks[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
-       clks[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
-       clks[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
-       clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
-
-       clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
-       clks[IMX8MN_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
-       clks[IMX8MN_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
-       clks[IMX8MN_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
-       clks[IMX8MN_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
-       clks[IMX8MN_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
-       clks[IMX8MN_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
-       clks[IMX8MN_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
-       clks[IMX8MN_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+       hws[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+       hws[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+       hws[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+       hws[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+       hws[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+       hws[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+       hws[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+       hws[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+       hws[IMX8MN_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+
+       hws[IMX8MN_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+       hws[IMX8MN_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+       hws[IMX8MN_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+       hws[IMX8MN_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+       hws[IMX8MN_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+       hws[IMX8MN_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+       hws[IMX8MN_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+       hws[IMX8MN_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+       hws[IMX8MN_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
 
        /* SYS PLL2 fixed output */
-       clks[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
-       clks[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
-       clks[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
-       clks[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
-       clks[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
-       clks[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
-       clks[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
-       clks[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
-       clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
-
-       clks[IMX8MN_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
-       clks[IMX8MN_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
-       clks[IMX8MN_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
-       clks[IMX8MN_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
-       clks[IMX8MN_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
-       clks[IMX8MN_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
-       clks[IMX8MN_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
-       clks[IMX8MN_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
-       clks[IMX8MN_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+       hws[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+       hws[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+       hws[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+       hws[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+       hws[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+       hws[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+       hws[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+       hws[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+       hws[IMX8MN_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+
+       hws[IMX8MN_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+       hws[IMX8MN_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+       hws[IMX8MN_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+       hws[IMX8MN_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+       hws[IMX8MN_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+       hws[IMX8MN_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+       hws[IMX8MN_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+       hws[IMX8MN_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+       hws[IMX8MN_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
 
        np = dev->of_node;
        base = devm_platform_ioremap_resource(pdev, 0);
        if (WARN_ON(IS_ERR(base))) {
                ret = PTR_ERR(base);
-               goto unregister_clks;
+               goto unregister_hws;
        }
 
        /* CORE */
-       clks[IMX8MN_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
-       clks[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3,  imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
-       clks[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels,  ARRAY_SIZE(imx8mn_gpu_shader_sels));
-       clks[IMX8MN_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
-       clks[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
-       clks[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
+       hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
+       hws[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3,  imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
+       hws[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels,  ARRAY_SIZE(imx8mn_gpu_shader_sels));
+       hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+       hws[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
+       hws[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
 
-       clks[IMX8MN_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
-       clks[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
-       clks[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+       hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+       hws[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
+       hws[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
 
        /* BUS */
-       clks[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
-       clks[IMX8MN_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
-       clks[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
-       clks[IMX8MN_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
-       clks[IMX8MN_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
-       clks[IMX8MN_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
-       clks[IMX8MN_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
-       clks[IMX8MN_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
-       clks[IMX8MN_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
-
-       clks[IMX8MN_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
-       clks[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
-       clks[IMX8MN_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
-       clks[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
-       clks[IMX8MN_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
-       clks[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
-       clks[IMX8MN_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
-       clks[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
-       clks[IMX8MN_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
-       clks[IMX8MN_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mn_sai3_sels, base + 0xa680);
-       clks[IMX8MN_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mn_sai5_sels, base + 0xa780);
-       clks[IMX8MN_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mn_sai6_sels, base + 0xa800);
-       clks[IMX8MN_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mn_spdif1_sels, base + 0xa880);
-       clks[IMX8MN_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mn_enet_ref_sels, base + 0xa980);
-       clks[IMX8MN_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mn_enet_timer_sels, base + 0xaa00);
-       clks[IMX8MN_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mn_enet_phy_sels, base + 0xaa80);
-       clks[IMX8MN_CLK_NAND] = imx8m_clk_composite("nand", imx8mn_nand_sels, base + 0xab00);
-       clks[IMX8MN_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mn_qspi_sels, base + 0xab80);
-       clks[IMX8MN_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mn_usdhc1_sels, base + 0xac00);
-       clks[IMX8MN_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mn_usdhc2_sels, base + 0xac80);
-       clks[IMX8MN_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mn_i2c1_sels, base + 0xad00);
-       clks[IMX8MN_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mn_i2c2_sels, base + 0xad80);
-       clks[IMX8MN_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mn_i2c3_sels, base + 0xae00);
-       clks[IMX8MN_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mn_i2c4_sels, base + 0xae80);
-       clks[IMX8MN_CLK_UART1] = imx8m_clk_composite("uart1", imx8mn_uart1_sels, base + 0xaf00);
-       clks[IMX8MN_CLK_UART2] = imx8m_clk_composite("uart2", imx8mn_uart2_sels, base + 0xaf80);
-       clks[IMX8MN_CLK_UART3] = imx8m_clk_composite("uart3", imx8mn_uart3_sels, base + 0xb000);
-       clks[IMX8MN_CLK_UART4] = imx8m_clk_composite("uart4", imx8mn_uart4_sels, base + 0xb080);
-       clks[IMX8MN_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mn_usb_core_sels, base + 0xb100);
-       clks[IMX8MN_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mn_usb_phy_sels, base + 0xb180);
-       clks[IMX8MN_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mn_gic_sels, base + 0xb200);
-       clks[IMX8MN_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mn_ecspi1_sels, base + 0xb280);
-       clks[IMX8MN_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mn_ecspi2_sels, base + 0xb300);
-       clks[IMX8MN_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mn_pwm1_sels, base + 0xb380);
-       clks[IMX8MN_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400);
-       clks[IMX8MN_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480);
-       clks[IMX8MN_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500);
-       clks[IMX8MN_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mn_wdog_sels, base + 0xb900);
-       clks[IMX8MN_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980);
-       clks[IMX8MN_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mn_clko1_sels, base + 0xba00);
-       clks[IMX8MN_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mn_clko2_sels, base + 0xba80);
-       clks[IMX8MN_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mn_dsi_core_sels, base + 0xbb00);
-       clks[IMX8MN_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mn_dsi_phy_sels, base + 0xbb80);
-       clks[IMX8MN_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mn_dsi_dbi_sels, base + 0xbc00);
-       clks[IMX8MN_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mn_usdhc3_sels, base + 0xbc80);
-       clks[IMX8MN_CLK_CAMERA_PIXEL] = imx8m_clk_composite("camera_pixel", imx8mn_camera_pixel_sels, base + 0xbd00);
-       clks[IMX8MN_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mn_csi1_phy_sels, base + 0xbd80);
-       clks[IMX8MN_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mn_csi2_phy_sels, base + 0xbf00);
-       clks[IMX8MN_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mn_csi2_esc_sels, base + 0xbf80);
-       clks[IMX8MN_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mn_ecspi3_sels, base + 0xc180);
-       clks[IMX8MN_CLK_PDM] = imx8m_clk_composite("pdm", imx8mn_pdm_sels, base + 0xc200);
-       clks[IMX8MN_CLK_SAI7] = imx8m_clk_composite("sai7", imx8mn_sai7_sels, base + 0xc300);
-
-       clks[IMX8MN_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
-       clks[IMX8MN_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
-       clks[IMX8MN_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
-       clks[IMX8MN_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
-       clks[IMX8MN_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
-       clks[IMX8MN_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
-       clks[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
-       clks[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
-       clks[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
-       clks[IMX8MN_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
-       clks[IMX8MN_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
-       clks[IMX8MN_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
-       clks[IMX8MN_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
-       clks[IMX8MN_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
-       clks[IMX8MN_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
-       clks[IMX8MN_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
-       clks[IMX8MN_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
-       clks[IMX8MN_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
-       clks[IMX8MN_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
-       clks[IMX8MN_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
-       clks[IMX8MN_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
-       clks[IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
-       clks[IMX8MN_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
-       clks[IMX8MN_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
-       clks[IMX8MN_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
-       clks[IMX8MN_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
-       clks[IMX8MN_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
-       clks[IMX8MN_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
-       clks[IMX8MN_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
-       clks[IMX8MN_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
-       clks[IMX8MN_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
-       clks[IMX8MN_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
-       clks[IMX8MN_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
-       clks[IMX8MN_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
-       clks[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
-       clks[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
-       clks[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
-       clks[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
-       clks[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
-       clks[IMX8MN_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
-       clks[IMX8MN_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
-       clks[IMX8MN_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
-       clks[IMX8MN_CLK_ASRC_ROOT] = imx_clk_gate4("asrc_root_clk", "audio_ahb", base + 0x4580, 0);
-       clks[IMX8MN_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
-       clks[IMX8MN_CLK_PDM_IPG]  = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
-       clks[IMX8MN_CLK_DISP_AXI_ROOT]  = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
-       clks[IMX8MN_CLK_DISP_APB_ROOT]  = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
-       clks[IMX8MN_CLK_CAMERA_PIXEL_ROOT] = imx_clk_gate2_shared2("camera_pixel_clk", "camera_pixel", base + 0x45d0, 0, &share_count_disp);
-       clks[IMX8MN_CLK_DISP_PIXEL_ROOT] = imx_clk_gate2_shared2("disp_pixel_clk", "disp_pixel", base + 0x45d0, 0, &share_count_disp);
-       clks[IMX8MN_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
-       clks[IMX8MN_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
-       clks[IMX8MN_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
-       clks[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
-       clks[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
-       clks[IMX8MN_CLK_SAI7_ROOT] = imx_clk_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
-
-       clks[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
-
-       clks[IMX8MN_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
-                                          clks[IMX8MN_CLK_A53_DIV],
-                                          clks[IMX8MN_CLK_A53_SRC],
-                                          clks[IMX8MN_ARM_PLL_OUT],
-                                          clks[IMX8MN_SYS_PLL1_800M]);
-
-       imx_check_clocks(clks, ARRAY_SIZE(clks));
-
-       clk_data.clks = clks;
-       clk_data.clk_num = ARRAY_SIZE(clks);
-       ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
+       hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
+       hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
+       hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
+       hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
+       hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
+       hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
+       hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
+       hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
+
+       hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
+       hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
+       hws[IMX8MN_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+       hws[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+       hws[IMX8MN_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
+
+       /*
+        * DRAM clocks are manipulated from TF-A outside clock framework.
+        * Mark with GET_RATE_NOCACHE to always read div value from hardware
+        */
+       hws[IMX8MN_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+       hws[IMX8MN_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mn_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+
+       hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
+       hws[IMX8MN_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
+       hws[IMX8MN_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mn_sai3_sels, base + 0xa680);
+       hws[IMX8MN_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mn_sai5_sels, base + 0xa780);
+       hws[IMX8MN_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mn_sai6_sels, base + 0xa800);
+       hws[IMX8MN_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mn_spdif1_sels, base + 0xa880);
+       hws[IMX8MN_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mn_enet_ref_sels, base + 0xa980);
+       hws[IMX8MN_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mn_enet_timer_sels, base + 0xaa00);
+       hws[IMX8MN_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mn_enet_phy_sels, base + 0xaa80);
+       hws[IMX8MN_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mn_nand_sels, base + 0xab00);
+       hws[IMX8MN_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mn_qspi_sels, base + 0xab80);
+       hws[IMX8MN_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mn_usdhc1_sels, base + 0xac00);
+       hws[IMX8MN_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mn_usdhc2_sels, base + 0xac80);
+       hws[IMX8MN_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mn_i2c1_sels, base + 0xad00);
+       hws[IMX8MN_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mn_i2c2_sels, base + 0xad80);
+       hws[IMX8MN_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mn_i2c3_sels, base + 0xae00);
+       hws[IMX8MN_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mn_i2c4_sels, base + 0xae80);
+       hws[IMX8MN_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mn_uart1_sels, base + 0xaf00);
+       hws[IMX8MN_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mn_uart2_sels, base + 0xaf80);
+       hws[IMX8MN_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mn_uart3_sels, base + 0xb000);
+       hws[IMX8MN_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mn_uart4_sels, base + 0xb080);
+       hws[IMX8MN_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mn_usb_core_sels, base + 0xb100);
+       hws[IMX8MN_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mn_usb_phy_sels, base + 0xb180);
+       hws[IMX8MN_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mn_gic_sels, base + 0xb200);
+       hws[IMX8MN_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mn_ecspi1_sels, base + 0xb280);
+       hws[IMX8MN_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mn_ecspi2_sels, base + 0xb300);
+       hws[IMX8MN_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mn_pwm1_sels, base + 0xb380);
+       hws[IMX8MN_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400);
+       hws[IMX8MN_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480);
+       hws[IMX8MN_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500);
+       hws[IMX8MN_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mn_wdog_sels, base + 0xb900);
+       hws[IMX8MN_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980);
+       hws[IMX8MN_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mn_clko1_sels, base + 0xba00);
+       hws[IMX8MN_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mn_clko2_sels, base + 0xba80);
+       hws[IMX8MN_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mn_dsi_core_sels, base + 0xbb00);
+       hws[IMX8MN_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mn_dsi_phy_sels, base + 0xbb80);
+       hws[IMX8MN_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mn_dsi_dbi_sels, base + 0xbc00);
+       hws[IMX8MN_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mn_usdhc3_sels, base + 0xbc80);
+       hws[IMX8MN_CLK_CAMERA_PIXEL] = imx8m_clk_hw_composite("camera_pixel", imx8mn_camera_pixel_sels, base + 0xbd00);
+       hws[IMX8MN_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mn_csi1_phy_sels, base + 0xbd80);
+       hws[IMX8MN_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mn_csi2_phy_sels, base + 0xbf00);
+       hws[IMX8MN_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mn_csi2_esc_sels, base + 0xbf80);
+       hws[IMX8MN_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mn_ecspi3_sels, base + 0xc180);
+       hws[IMX8MN_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mn_pdm_sels, base + 0xc200);
+       hws[IMX8MN_CLK_SAI7] = imx8m_clk_hw_composite("sai7", imx8mn_sai7_sels, base + 0xc300);
+
+       hws[IMX8MN_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+       hws[IMX8MN_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+       hws[IMX8MN_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+       hws[IMX8MN_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+       hws[IMX8MN_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+       hws[IMX8MN_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+       hws[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+       hws[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+       hws[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+       hws[IMX8MN_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+       hws[IMX8MN_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+       hws[IMX8MN_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+       hws[IMX8MN_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+       hws[IMX8MN_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+       hws[IMX8MN_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+       hws[IMX8MN_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+       hws[IMX8MN_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+       hws[IMX8MN_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+       hws[IMX8MN_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+       hws[IMX8MN_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+       hws[IMX8MN_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+       hws[IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+       hws[IMX8MN_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+       hws[IMX8MN_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+       hws[IMX8MN_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+       hws[IMX8MN_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+       hws[IMX8MN_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+       hws[IMX8MN_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+       hws[IMX8MN_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+       hws[IMX8MN_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+       hws[IMX8MN_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+       hws[IMX8MN_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+       hws[IMX8MN_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+       hws[IMX8MN_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+       hws[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+       hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
+       hws[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+       hws[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+       hws[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+       hws[IMX8MN_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+       hws[IMX8MN_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+       hws[IMX8MN_CLK_GPU_BUS_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+       hws[IMX8MN_CLK_ASRC_ROOT] = imx_clk_hw_gate4("asrc_root_clk", "audio_ahb", base + 0x4580, 0);
+       hws[IMX8MN_CLK_PDM_ROOT] = imx_clk_hw_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+       hws[IMX8MN_CLK_PDM_IPG]  = imx_clk_hw_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+       hws[IMX8MN_CLK_DISP_AXI_ROOT]  = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
+       hws[IMX8MN_CLK_DISP_APB_ROOT]  = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
+       hws[IMX8MN_CLK_CAMERA_PIXEL_ROOT] = imx_clk_hw_gate2_shared2("camera_pixel_clk", "camera_pixel", base + 0x45d0, 0, &share_count_disp);
+       hws[IMX8MN_CLK_DISP_PIXEL_ROOT] = imx_clk_hw_gate2_shared2("disp_pixel_clk", "disp_pixel", base + 0x45d0, 0, &share_count_disp);
+       hws[IMX8MN_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+       hws[IMX8MN_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+       hws[IMX8MN_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+       hws[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+       hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+       hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
+
+       hws[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+
+       hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+                                          hws[IMX8MN_CLK_A53_DIV]->clk,
+                                          hws[IMX8MN_CLK_A53_SRC]->clk,
+                                          hws[IMX8MN_ARM_PLL_OUT]->clk,
+                                          hws[IMX8MN_SYS_PLL1_800M]->clk);
+
+       imx_check_clk_hws(hws, IMX8MN_CLK_END);
+
+       ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
        if (ret < 0) {
-               dev_err(dev, "failed to register clks for i.MX8MN\n");
-               goto unregister_clks;
+               dev_err(dev, "failed to register hws for i.MX8MN\n");
+               goto unregister_hws;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+               int index = uart_clk_ids[i];
+
+               uart_hws[i] = &hws[index]->clk;
        }
 
-       imx_register_uart_clocks(uart_clks);
+       imx_register_uart_clocks(uart_hws);
 
        return 0;
 
-unregister_clks:
-       imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+       imx_unregister_hw_clocks(hws, IMX8MN_CLK_END);
 
        return ret;
 }
@@ -572,6 +591,11 @@ static struct platform_driver imx8mn_clk_driver = {
        .probe = imx8mn_clocks_probe,
        .driver = {
                .name = "imx8mn-ccm",
+               /*
+                * Disable bind attributes: clocks are not removed and
+                * reloading the driver will crash or break devices.
+                */
+               .suppress_bind_attrs = true,
                .of_match_table = of_match_ptr(imx8mn_clk_of_match),
        },
 };
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
new file mode 100644 (file)
index 0000000..f6c120c
--- /dev/null
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <dt-bindings/clock/imx8mp-clock.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+static u32 share_count_nand;
+static u32 share_count_media;
+
+static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
+static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char * const sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
+static const char * const sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
+static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
+
+static const char * const imx8mp_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m",
+                                              "sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m",
+                                              "audio_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m",
+                                             "vpu_pll_out", "sys_pll1_800m", "audio_pll1_out",
+                                             "video_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_ml_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+                                             "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                             "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu3d_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+                                                     "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                                     "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu3d_shader_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+                                                       "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                                       "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu2d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+                                                "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                                "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_audio_axi_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+                                                    "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                                    "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_hsio_axi_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+                                                   "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+                                                   "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_media_isp_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m",
+                                                    "sys_pll3_out", "sys_pll1_400m", "audio_pll2_out",
+                                                    "clk_ext1", "sys_pll2_500m", };
+
+static const char * const imx8mp_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m",
+                                                   "sys_pll2_250m", "sys_pll2_1000m", "audio_pll1_out",
+                                                   "video_pll1_out", "sys_pll1_100m",};
+
+static const char * const imx8mp_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+                                                   "sys_pll2_250m", "sys_pll2_200m", "audio_pll1_out",
+                                                   "video_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+                                                     "sys_pll2_200m", "sys_pll1_133m", "sys_pll3_out",
+                                                     "sys_pll2_250m", "audio_pll1_out", };
+
+static const char * const imx8mp_vpu_bus_sels[] = {"osc_24m", "sys_pll1_800m", "vpu_pll_out",
+                                                  "audio_pll2_out", "sys_pll3_out", "sys_pll2_1000m",
+                                                  "sys_pll2_200m", "sys_pll1_100m", };
+
+static const char * const imx8mp_media_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m",
+                                                    "sys_pll3_out", "sys_pll1_40m", "audio_pll2_out",
+                                                    "clk_ext1", "sys_pll2_500m", };
+
+static const char * const imx8mp_media_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m",
+                                                    "sys_pll3_out", "sys_pll1_40m", "audio_pll2_out",
+                                                    "clk_ext1", "sys_pll1_133m", };
+
+static const char * const imx8mp_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+                                                  "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                                  "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+                                                  "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                                  "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+                                              "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+                                              "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_noc_io_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+                                                 "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+                                                 "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ml_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+                                                 "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                                 "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ml_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+                                                 "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+                                                 "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m",
+                                              "sys_pll1_400m", "sys_pll2_125m", "sys_pll3_out",
+                                              "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+                                                    "sys_pll2_1000m", "sys_pll2_166m", "sys_pll3_out",
+                                                    "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_mipi_dsi_esc_rx_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+                                                          "sys_pll1_800m", "sys_pll2_1000m",
+                                                          "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m",
+                                                   "sys_pll2_500m", "sys_pll2_1000m", "sys_pll3_out",
+                                                   "audio_pll1_out", "sys_pll1_266m", };
+
+static const char * const imx8mp_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+                                                   "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+                                                   "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_vpu_g1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+                                                 "sys_pll2_1000m", "sys_pll1_100m", "sys_pll2_125m",
+                                                 "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_vpu_g2_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+                                                 "sys_pll2_1000m", "sys_pll1_100m", "sys_pll2_125m",
+                                                 "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_can1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+                                               "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+                                               "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_can2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+                                               "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+                                               "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_memrepair_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+                                                    "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+                                                    "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_pcie_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m",
+                                                   "clk_ext1", "clk_ext2", "clk_ext3",
+                                                   "clk_ext4", "sys_pll1_400m", };
+
+static const char * const imx8mp_pcie_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m",
+                                                   "sys_pll3_out", "sys_pll2_100m", "sys_pll1_80m",
+                                                   "sys_pll1_160m", "sys_pll1_200m", };
+
+static const char * const imx8mp_i2c5_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+                                               "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+                                               "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c6_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+                                               "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+                                               "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_sai1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+                                               "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+                                               "clk_ext1", "clk_ext2", };
+
+static const char * const imx8mp_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+                                               "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+                                               "clk_ext2", "clk_ext3", };
+
+static const char * const imx8mp_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+                                               "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+                                               "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+                                               "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+                                               "clk_ext1", "clk_ext2", };
+
+static const char * const imx8mp_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+                                               "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+                                               "clk_ext2", "clk_ext3", };
+
+static const char * const imx8mp_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+                                               "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+                                               "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_enet_qos_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+                                                   "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+                                                   "video_pll1_out", "clk_ext4", };
+
+static const char * const imx8mp_enet_qos_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+                                                         "clk_ext1", "clk_ext2", "clk_ext3",
+                                                         "clk_ext4", "video_pll1_out", };
+
+static const char * const imx8mp_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+                                                   "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+                                                   "video_pll1_out", "clk_ext4", };
+
+static const char * const imx8mp_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+                                                     "clk_ext1", "clk_ext2", "clk_ext3",
+                                                     "clk_ext4", "video_pll1_out", };
+
+static const char * const imx8mp_enet_phy_ref_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m",
+                                                       "sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out",
+                                                       "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out",
+                                               "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out",
+                                               "sys_pll2_250m", "video_pll1_out", };
+
+static const char * const imx8mp_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll2_333m",
+                                               "sys_pll2_500m", "audio_pll2_out", "sys_pll1_266m",
+                                               "sys_pll3_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+                                                 "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+                                                 "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+                                                 "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+                                                 "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+                                               "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+                                               "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+                                               "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+                                               "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+                                               "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+                                               "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+                                               "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+                                               "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+                                                "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+                                                "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_uart2_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+                                                "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+                                                "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_uart3_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+                                                "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+                                                "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_uart4_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+                                                "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+                                                "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_usb_core_ref_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m",
+                                                       "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+                                                       "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_usb_phy_ref_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m",
+                                                      "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+                                                      "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_gic_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+                                              "sys_pll2_100m", "sys_pll1_800m",
+                                              "sys_pll2_500m", "clk_ext4", "audio_pll2_out" };
+
+static const char * const imx8mp_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+                                                 "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+                                                 "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+                                                 "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+                                                 "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+                                               "sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+                                               "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+                                               "sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+                                               "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+                                               "sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+                                               "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+                                               "sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+                                               "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+                                               "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+                                               "audio_pll1_out", "clk_ext1" };
+
+static const char * const imx8mp_gpt2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+                                               "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+                                               "audio_pll1_out", "clk_ext2" };
+
+static const char * const imx8mp_gpt3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+                                               "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+                                               "audio_pll1_out", "clk_ext3" };
+
+static const char * const imx8mp_gpt4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+                                               "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+                                               "audio_pll1_out", "clk_ext1" };
+
+static const char * const imx8mp_gpt5_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+                                               "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+                                               "audio_pll1_out", "clk_ext2" };
+
+static const char * const imx8mp_gpt6_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+                                               "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+                                               "audio_pll1_out", "clk_ext3" };
+
+static const char * const imx8mp_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m",
+                                               "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out",
+                                               "sys_pll1_80m", "sys_pll2_166m" };
+
+static const char * const imx8mp_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out",
+                                                "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m",
+                                                "sys_pll2_500m", "sys_pll1_100m" };
+
+static const char * const imx8mp_ipp_do_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_133m",
+                                                       "sys_pll1_200m", "audio_pll2_out", "sys_pll2_500m",
+                                                       "vpu_pll_out", "sys_pll1_80m" };
+
+static const char * const imx8mp_ipp_do_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m",
+                                                       "sys_pll1_166m", "sys_pll3_out", "audio_pll1_out",
+                                                       "video_pll1_out", "osc_32k" };
+
+static const char * const imx8mp_hdmi_fdcc_tst_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+                                                        "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+                                                        "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mp_hdmi_27m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+                                                   "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+                                                   "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_hdmi_ref_266m_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll3_out",
+                                                        "sys_pll2_333m", "sys_pll1_266m", "sys_pll2_200m",
+                                                        "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+                                                 "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+                                                 "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_media_cam1_pix_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+                                                         "sys_pll1_800m", "sys_pll2_1000m",
+                                                         "sys_pll3_out", "audio_pll2_out",
+                                                         "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_phy1_ref_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+                                                              "sys_pll1_800m", "sys_pll2_1000m",
+                                                              "clk_ext2", "audio_pll2_out",
+                                                              "video_pll1_out", };
+
+static const char * const imx8mp_media_disp1_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out",
+                                                          "audio_pll1_out", "sys_pll1_800m",
+                                                          "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
+
+static const char * const imx8mp_media_cam2_pix_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+                                                         "sys_pll1_800m", "sys_pll2_1000m",
+                                                         "sys_pll3_out", "audio_pll2_out",
+                                                         "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_phy2_ref_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+                                                              "sys_pll1_800m", "sys_pll2_1000m",
+                                                              "clk_ext2", "audio_pll2_out",
+                                                              "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+                                                              "sys_pll1_800m", "sys_pll2_1000m",
+                                                              "sys_pll3_out", "clk_ext3",
+                                                              "audio_pll2_out", };
+
+static const char * const imx8mp_pcie2_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m",
+                                                     "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_500m",
+                                                     "sys_pll2_333m", "sys_pll3_out", };
+
+static const char * const imx8mp_pcie2_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m",
+                                                    "clk_ext1", "clk_ext2", "clk_ext3",
+                                                    "clk_ext4", "sys_pll1_400m", };
+
+static const char * const imx8mp_media_mipi_test_byte_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m",
+                                                               "sys_pll3_out", "sys_pll2_100m",
+                                                               "sys_pll1_80m", "sys_pll1_160m",
+                                                               "sys_pll1_200m", };
+
+static const char * const imx8mp_ecspi3_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+                                                 "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+                                                 "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+                                              "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+                                              "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_vpu_vc8000e_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+                                                      "sys_pll2_1000m", "audio_pll2_out", "sys_pll2_125m",
+                                                      "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+                                               "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+                                               "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+
+static struct clk_hw **hws;
+static struct clk_hw_onecell_data *clk_hw_data;
+
+static const int uart_clk_ids[] = {
+       IMX8MP_CLK_UART1_ROOT,
+       IMX8MP_CLK_UART2_ROOT,
+       IMX8MP_CLK_UART3_ROOT,
+       IMX8MP_CLK_UART4_ROOT,
+};
+static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
+
+static int imx8mp_clocks_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       void __iomem *anatop_base, *ccm_base;
+       int i;
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
+       anatop_base = of_iomap(np, 0);
+       if (WARN_ON(!anatop_base))
+               return -ENOMEM;
+
+       np = dev->of_node;
+       ccm_base = devm_platform_ioremap_resource(pdev, 0);
+       if (WARN_ON(IS_ERR(ccm_base))) {
+               iounmap(anatop_base);
+               return PTR_ERR(ccm_base);
+       }
+
+       clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL);
+       if (WARN_ON(!clk_hw_data)) {
+               iounmap(anatop_base);
+               return -ENOMEM;
+       }
+
+       clk_hw_data->num = IMX8MP_CLK_END;
+       hws = clk_hw_data->hws;
+
+       hws[IMX8MP_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+       hws[IMX8MP_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+       hws[IMX8MP_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+       hws[IMX8MP_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+       hws[IMX8MP_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+       hws[IMX8MP_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+       hws[IMX8MP_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
+
+       hws[IMX8MP_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", anatop_base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", anatop_base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", anatop_base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", anatop_base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", anatop_base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", anatop_base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", anatop_base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_SYS_PLL1_REF_SEL] = imx_clk_hw_mux("sys_pll1_ref_sel", anatop_base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_SYS_PLL2_REF_SEL] = imx_clk_hw_mux("sys_pll2_ref_sel", anatop_base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MP_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", anatop_base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+       hws[IMX8MP_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", anatop_base, &imx_1443x_pll);
+       hws[IMX8MP_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", anatop_base + 0x14, &imx_1443x_pll);
+       hws[IMX8MP_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", anatop_base + 0x28, &imx_1443x_pll);
+       hws[IMX8MP_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", anatop_base + 0x50, &imx_1443x_dram_pll);
+       hws[IMX8MP_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", anatop_base + 0x64, &imx_1416x_pll);
+       hws[IMX8MP_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", anatop_base + 0x74, &imx_1416x_pll);
+       hws[IMX8MP_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", anatop_base + 0x84, &imx_1416x_pll);
+       hws[IMX8MP_SYS_PLL1] = imx_clk_hw_pll14xx("sys_pll1", "sys_pll1_ref_sel", anatop_base + 0x94, &imx_1416x_pll);
+       hws[IMX8MP_SYS_PLL2] = imx_clk_hw_pll14xx("sys_pll2", "sys_pll2_ref_sel", anatop_base + 0x104, &imx_1416x_pll);
+       hws[IMX8MP_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", anatop_base + 0x114, &imx_1416x_pll);
+
+       hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+
+       hws[IMX8MP_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", anatop_base, 13);
+       hws[IMX8MP_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", anatop_base + 0x14, 13);
+       hws[IMX8MP_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", anatop_base + 0x28, 13);
+       hws[IMX8MP_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", anatop_base + 0x50, 13);
+       hws[IMX8MP_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", anatop_base + 0x64, 11);
+       hws[IMX8MP_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", anatop_base + 0x74, 11);
+       hws[IMX8MP_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", anatop_base + 0x84, 11);
+       hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11);
+       hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11);
+       hws[IMX8MP_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", anatop_base + 0x114, 11);
+
+       hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+       hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+       hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+       hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+       hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+       hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+       hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+       hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+       hws[IMX8MP_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+
+       hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+       hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+       hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+       hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+       hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+       hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+       hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+       hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+       hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+
+       hws[IMX8MP_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", ccm_base + 0x8000, 24, 3, imx8mp_a53_sels, ARRAY_SIZE(imx8mp_a53_sels));
+       hws[IMX8MP_CLK_M7_SRC] = imx_clk_hw_mux2("arm_m7_src", ccm_base + 0x8080, 24, 3, imx8mp_m7_sels, ARRAY_SIZE(imx8mp_m7_sels));
+       hws[IMX8MP_CLK_ML_SRC] = imx_clk_hw_mux2("ml_src", ccm_base + 0x8100, 24, 3, imx8mp_ml_sels, ARRAY_SIZE(imx8mp_ml_sels));
+       hws[IMX8MP_CLK_GPU3D_CORE_SRC] = imx_clk_hw_mux2("gpu3d_core_src", ccm_base + 0x8180, 24, 3,  imx8mp_gpu3d_core_sels, ARRAY_SIZE(imx8mp_gpu3d_core_sels));
+       hws[IMX8MP_CLK_GPU3D_SHADER_SRC] = imx_clk_hw_mux2("gpu3d_shader_src", ccm_base + 0x8200, 24, 3, imx8mp_gpu3d_shader_sels, ARRAY_SIZE(imx8mp_gpu3d_shader_sels));
+       hws[IMX8MP_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", ccm_base + 0x8280, 24, 3, imx8mp_gpu2d_sels, ARRAY_SIZE(imx8mp_gpu2d_sels));
+       hws[IMX8MP_CLK_AUDIO_AXI_SRC] = imx_clk_hw_mux2("audio_axi_src", ccm_base + 0x8300, 24, 3, imx8mp_audio_axi_sels, ARRAY_SIZE(imx8mp_audio_axi_sels));
+       hws[IMX8MP_CLK_HSIO_AXI_SRC] = imx_clk_hw_mux2("hsio_axi_src", ccm_base + 0x8380, 24, 3, imx8mp_hsio_axi_sels, ARRAY_SIZE(imx8mp_hsio_axi_sels));
+       hws[IMX8MP_CLK_MEDIA_ISP_SRC] = imx_clk_hw_mux2("media_isp_src", ccm_base + 0x8400, 24, 3, imx8mp_media_isp_sels, ARRAY_SIZE(imx8mp_media_isp_sels));
+       hws[IMX8MP_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", ccm_base + 0x8000, 28);
+       hws[IMX8MP_CLK_M4_CG] = imx_clk_hw_gate3("arm_m7_cg", "arm_m7_src", ccm_base + 0x8080, 28);
+       hws[IMX8MP_CLK_ML_CG] = imx_clk_hw_gate3("ml_cg", "ml_src", ccm_base + 0x8100, 28);
+       hws[IMX8MP_CLK_GPU3D_CORE_CG] = imx_clk_hw_gate3("gpu3d_core_cg", "gpu3d_core_src", ccm_base + 0x8180, 28);
+       hws[IMX8MP_CLK_GPU3D_SHADER_CG] = imx_clk_hw_gate3("gpu3d_shader_cg", "gpu3d_shader_src", ccm_base + 0x8200, 28);
+       hws[IMX8MP_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", ccm_base + 0x8280, 28);
+       hws[IMX8MP_CLK_AUDIO_AXI_CG] = imx_clk_hw_gate3("audio_axi_cg", "audio_axi_src", ccm_base + 0x8300, 28);
+       hws[IMX8MP_CLK_HSIO_AXI_CG] = imx_clk_hw_gate3("hsio_axi_cg", "hsio_axi_src", ccm_base + 0x8380, 28);
+       hws[IMX8MP_CLK_MEDIA_ISP_CG] = imx_clk_hw_gate3("media_isp_cg", "media_isp_src", ccm_base + 0x8400, 28);
+       hws[IMX8MP_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", ccm_base + 0x8000, 0, 3);
+       hws[IMX8MP_CLK_M7_DIV] = imx_clk_hw_divider2("arm_m7_div", "arm_m7_cg", ccm_base + 0x8080, 0, 3);
+       hws[IMX8MP_CLK_ML_DIV] = imx_clk_hw_divider2("ml_div", "ml_cg", ccm_base + 0x8100, 0, 3);
+       hws[IMX8MP_CLK_GPU3D_CORE_DIV] = imx_clk_hw_divider2("gpu3d_core_div", "gpu3d_core_cg", ccm_base + 0x8180, 0, 3);
+       hws[IMX8MP_CLK_GPU3D_SHADER_DIV] = imx_clk_hw_divider2("gpu3d_shader_div", "gpu3d_shader_cg", ccm_base + 0x8200, 0, 3);
+       hws[IMX8MP_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", ccm_base + 0x8280, 0, 3);
+       hws[IMX8MP_CLK_AUDIO_AXI_DIV] = imx_clk_hw_divider2("audio_axi_div", "audio_axi_cg", ccm_base + 0x8300, 0, 3);
+       hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3);
+       hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3);
+
+       hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
+       hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
+       hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
+       hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
+       hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
+       hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
+       hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
+       hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_apb_sels, ccm_base + 0x8b80);
+       hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
+       hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
+       hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
+       hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
+       hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
+       hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
+
+       hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
+       hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
+       hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
+
+       hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
+       hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1);
+
+       hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
+       hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
+       hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
+       hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
+       hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
+       hws[IMX8MP_CLK_CAN2] = imx8m_clk_hw_composite("can2", imx8mp_can2_sels, ccm_base + 0xa280);
+       hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite("memrepair", imx8mp_memrepair_sels, ccm_base + 0xa300);
+       hws[IMX8MP_CLK_PCIE_PHY] = imx8m_clk_hw_composite("pcie_phy", imx8mp_pcie_phy_sels, ccm_base + 0xa380);
+       hws[IMX8MP_CLK_PCIE_AUX] = imx8m_clk_hw_composite("pcie_aux", imx8mp_pcie_aux_sels, ccm_base + 0xa400);
+       hws[IMX8MP_CLK_I2C5] = imx8m_clk_hw_composite("i2c5", imx8mp_i2c5_sels, ccm_base + 0xa480);
+       hws[IMX8MP_CLK_I2C6] = imx8m_clk_hw_composite("i2c6", imx8mp_i2c6_sels, ccm_base + 0xa500);
+       hws[IMX8MP_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mp_sai1_sels, ccm_base + 0xa580);
+       hws[IMX8MP_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mp_sai2_sels, ccm_base + 0xa600);
+       hws[IMX8MP_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mp_sai3_sels, ccm_base + 0xa680);
+       hws[IMX8MP_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mp_sai4_sels, ccm_base + 0xa700);
+       hws[IMX8MP_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mp_sai5_sels, ccm_base + 0xa780);
+       hws[IMX8MP_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mp_sai6_sels, ccm_base + 0xa800);
+       hws[IMX8MP_CLK_ENET_QOS] = imx8m_clk_hw_composite("enet_qos", imx8mp_enet_qos_sels, ccm_base + 0xa880);
+       hws[IMX8MP_CLK_ENET_QOS_TIMER] = imx8m_clk_hw_composite("enet_qos_timer", imx8mp_enet_qos_timer_sels, ccm_base + 0xa900);
+       hws[IMX8MP_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mp_enet_ref_sels, ccm_base + 0xa980);
+       hws[IMX8MP_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mp_enet_timer_sels, ccm_base + 0xaa00);
+       hws[IMX8MP_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy_ref", imx8mp_enet_phy_ref_sels, ccm_base + 0xaa80);
+       hws[IMX8MP_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mp_nand_sels, ccm_base + 0xab00);
+       hws[IMX8MP_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mp_qspi_sels, ccm_base + 0xab80);
+       hws[IMX8MP_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mp_usdhc1_sels, ccm_base + 0xac00);
+       hws[IMX8MP_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mp_usdhc2_sels, ccm_base + 0xac80);
+       hws[IMX8MP_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mp_i2c1_sels, ccm_base + 0xad00);
+       hws[IMX8MP_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mp_i2c2_sels, ccm_base + 0xad80);
+       hws[IMX8MP_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mp_i2c3_sels, ccm_base + 0xae00);
+       hws[IMX8MP_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mp_i2c4_sels, ccm_base + 0xae80);
+
+       hws[IMX8MP_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mp_uart1_sels, ccm_base + 0xaf00);
+       hws[IMX8MP_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mp_uart2_sels, ccm_base + 0xaf80);
+       hws[IMX8MP_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mp_uart3_sels, ccm_base + 0xb000);
+       hws[IMX8MP_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mp_uart4_sels, ccm_base + 0xb080);
+       hws[IMX8MP_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mp_usb_core_ref_sels, ccm_base + 0xb100);
+       hws[IMX8MP_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mp_usb_phy_ref_sels, ccm_base + 0xb180);
+       hws[IMX8MP_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mp_gic_sels, ccm_base + 0xb200);
+       hws[IMX8MP_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mp_ecspi1_sels, ccm_base + 0xb280);
+       hws[IMX8MP_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mp_ecspi2_sels, ccm_base + 0xb300);
+       hws[IMX8MP_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mp_pwm1_sels, ccm_base + 0xb380);
+       hws[IMX8MP_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mp_pwm2_sels, ccm_base + 0xb400);
+       hws[IMX8MP_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mp_pwm3_sels, ccm_base + 0xb480);
+       hws[IMX8MP_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mp_pwm4_sels, ccm_base + 0xb500);
+
+       hws[IMX8MP_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mp_gpt1_sels, ccm_base + 0xb580);
+       hws[IMX8MP_CLK_GPT2] = imx8m_clk_hw_composite("gpt2", imx8mp_gpt2_sels, ccm_base + 0xb600);
+       hws[IMX8MP_CLK_GPT3] = imx8m_clk_hw_composite("gpt3", imx8mp_gpt3_sels, ccm_base + 0xb680);
+       hws[IMX8MP_CLK_GPT4] = imx8m_clk_hw_composite("gpt4", imx8mp_gpt4_sels, ccm_base + 0xb700);
+       hws[IMX8MP_CLK_GPT5] = imx8m_clk_hw_composite("gpt5", imx8mp_gpt5_sels, ccm_base + 0xb780);
+       hws[IMX8MP_CLK_GPT6] = imx8m_clk_hw_composite("gpt6", imx8mp_gpt6_sels, ccm_base + 0xb800);
+       hws[IMX8MP_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mp_wdog_sels, ccm_base + 0xb900);
+       hws[IMX8MP_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mp_wrclk_sels, ccm_base + 0xb980);
+       hws[IMX8MP_CLK_IPP_DO_CLKO1] = imx8m_clk_hw_composite("ipp_do_clko1", imx8mp_ipp_do_clko1_sels, ccm_base + 0xba00);
+       hws[IMX8MP_CLK_IPP_DO_CLKO2] = imx8m_clk_hw_composite("ipp_do_clko2", imx8mp_ipp_do_clko2_sels, ccm_base + 0xba80);
+       hws[IMX8MP_CLK_HDMI_FDCC_TST] = imx8m_clk_hw_composite("hdmi_fdcc_tst", imx8mp_hdmi_fdcc_tst_sels, ccm_base + 0xbb00);
+       hws[IMX8MP_CLK_HDMI_27M] = imx8m_clk_hw_composite("hdmi_27m", imx8mp_hdmi_27m_sels, ccm_base + 0xbb80);
+       hws[IMX8MP_CLK_HDMI_REF_266M] = imx8m_clk_hw_composite("hdmi_ref_266m", imx8mp_hdmi_ref_266m_sels, ccm_base + 0xbc00);
+       hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
+       hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
+       hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80);
+       hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp1_pix_sels, ccm_base + 0xbe00);
+       hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80);
+       hws[IMX8MP_CLK_MEDIA_MIPI_PHY2_REF] = imx8m_clk_hw_composite("media_mipi_phy2_ref", imx8mp_media_mipi_phy2_ref_sels, ccm_base + 0xbf00);
+       hws[IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC] = imx8m_clk_hw_composite("media_mipi_csi2_esc", imx8mp_media_mipi_csi2_esc_sels, ccm_base + 0xbf80);
+       hws[IMX8MP_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mp_pcie2_ctrl_sels, ccm_base + 0xc000);
+       hws[IMX8MP_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mp_pcie2_phy_sels, ccm_base + 0xc080);
+       hws[IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE] = imx8m_clk_hw_composite("media_mipi_test_byte", imx8mp_media_mipi_test_byte_sels, ccm_base + 0xc100);
+       hws[IMX8MP_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mp_ecspi3_sels, ccm_base + 0xc180);
+       hws[IMX8MP_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mp_pdm_sels, ccm_base + 0xc200);
+       hws[IMX8MP_CLK_VPU_VC8000E] = imx8m_clk_hw_composite("vpu_vc8000e", imx8mp_vpu_vc8000e_sels, ccm_base + 0xc280);
+       hws[IMX8MP_CLK_SAI7] = imx8m_clk_hw_composite("sai7", imx8mp_sai7_sels, ccm_base + 0xc300);
+
+       hws[IMX8MP_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+       hws[IMX8MP_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", ccm_base + 0x9800, 24, 1, imx8mp_dram_core_sels, ARRAY_SIZE(imx8mp_dram_core_sels), CLK_IS_CRITICAL);
+
+       hws[IMX8MP_CLK_DRAM1_ROOT] = imx_clk_hw_gate4_flags("dram1_root_clk", "dram_core_clk", ccm_base + 0x4050, 0, CLK_IS_CRITICAL);
+       hws[IMX8MP_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", ccm_base + 0x4070, 0);
+       hws[IMX8MP_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", ccm_base + 0x4080, 0);
+       hws[IMX8MP_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", ccm_base + 0x4090, 0);
+       hws[IMX8MP_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", ccm_base + 0x40a0, 0);
+       hws[IMX8MP_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", ccm_base + 0x40b0, 0);
+       hws[IMX8MP_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", ccm_base + 0x40c0, 0);
+       hws[IMX8MP_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", ccm_base + 0x40d0, 0);
+       hws[IMX8MP_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", ccm_base + 0x40e0, 0);
+       hws[IMX8MP_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", ccm_base + 0x40f0, 0);
+       hws[IMX8MP_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", ccm_base + 0x4100, 0);
+       hws[IMX8MP_CLK_GPT2_ROOT] = imx_clk_hw_gate4("gpt2_root_clk", "gpt2", ccm_base + 0x4110, 0);
+       hws[IMX8MP_CLK_GPT3_ROOT] = imx_clk_hw_gate4("gpt3_root_clk", "gpt3", ccm_base + 0x4120, 0);
+       hws[IMX8MP_CLK_GPT4_ROOT] = imx_clk_hw_gate4("gpt4_root_clk", "gpt4", ccm_base + 0x4130, 0);
+       hws[IMX8MP_CLK_GPT5_ROOT] = imx_clk_hw_gate4("gpt5_root_clk", "gpt5", ccm_base + 0x4140, 0);
+       hws[IMX8MP_CLK_GPT6_ROOT] = imx_clk_hw_gate4("gpt6_root_clk", "gpt6", ccm_base + 0x4150, 0);
+       hws[IMX8MP_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", ccm_base + 0x4170, 0);
+       hws[IMX8MP_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", ccm_base + 0x4180, 0);
+       hws[IMX8MP_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", ccm_base + 0x4190, 0);
+       hws[IMX8MP_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", ccm_base + 0x41a0, 0);
+       hws[IMX8MP_CLK_PCIE_ROOT] = imx_clk_hw_gate4("pcie_root_clk", "pcie_aux", ccm_base + 0x4250, 0);
+       hws[IMX8MP_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", ccm_base + 0x4280, 0);
+       hws[IMX8MP_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", ccm_base + 0x4290, 0);
+       hws[IMX8MP_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", ccm_base + 0x42a0, 0);
+       hws[IMX8MP_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", ccm_base + 0x42b0, 0);
+       hws[IMX8MP_CLK_QOS_ROOT] = imx_clk_hw_gate4("qos_root_clk", "ipg_root", ccm_base + 0x42c0, 0);
+       hws[IMX8MP_CLK_QOS_ENET_ROOT] = imx_clk_hw_gate4("qos_enet_root_clk", "ipg_root", ccm_base + 0x42e0, 0);
+       hws[IMX8MP_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", ccm_base + 0x42f0, 0);
+       hws[IMX8MP_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", ccm_base + 0x4300, 0, &share_count_nand);
+       hws[IMX8MP_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", ccm_base + 0x4300, 0, &share_count_nand);
+       hws[IMX8MP_CLK_I2C5_ROOT] = imx_clk_hw_gate2("i2c5_root_clk", "i2c5", ccm_base + 0x4330, 0);
+       hws[IMX8MP_CLK_I2C6_ROOT] = imx_clk_hw_gate2("i2c6_root_clk", "i2c6", ccm_base + 0x4340, 0);
+       hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0);
+       hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0);
+       hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
+       hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "enet_axi", ccm_base + 0x43b0, 0);
+       hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
+       hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0);
+       hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0);
+       hws[IMX8MP_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", ccm_base + 0x4470, 0);
+       hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0);
+       hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
+       hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
+       hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
+       hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0);
+       hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
+       hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
+       hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
+       hws[IMX8MP_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", ccm_base + 0x4530, 0);
+       hws[IMX8MP_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", ccm_base + 0x4540, 0);
+       hws[IMX8MP_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", ccm_base + 0x4550, 0);
+       hws[IMX8MP_CLK_VPU_G1_ROOT] = imx_clk_hw_gate4("vpu_g1_root_clk", "vpu_g1", ccm_base + 0x4560, 0);
+       hws[IMX8MP_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", ccm_base + 0x4570, 0);
+       hws[IMX8MP_CLK_VPU_VC8KE_ROOT] = imx_clk_hw_gate4("vpu_vc8ke_root_clk", "vpu_vc8000e", ccm_base + 0x4590, 0);
+       hws[IMX8MP_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", ccm_base + 0x45a0, 0);
+       hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_div", ccm_base + 0x45b0, 0);
+       hws[IMX8MP_CLK_HSIO_ROOT] = imx_clk_hw_gate4("hsio_root_clk", "ipg_root", ccm_base + 0x45c0, 0);
+       hws[IMX8MP_CLK_MEDIA_APB_ROOT] = imx_clk_hw_gate2_shared2("media_apb_root_clk", "media_apb", ccm_base + 0x45d0, 0, &share_count_media);
+       hws[IMX8MP_CLK_MEDIA_AXI_ROOT] = imx_clk_hw_gate2_shared2("media_axi_root_clk", "media_axi", ccm_base + 0x45d0, 0, &share_count_media);
+       hws[IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam1_pix_root_clk", "media_cam1_pix", ccm_base + 0x45d0, 0, &share_count_media);
+       hws[IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam2_pix_root_clk", "media_cam2_pix", ccm_base + 0x45d0, 0, &share_count_media);
+       hws[IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp1_pix_root_clk", "media_disp1_pix", ccm_base + 0x45d0, 0, &share_count_media);
+       hws[IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp2_pix_root_clk", "media_disp2_pix", ccm_base + 0x45d0, 0, &share_count_media);
+       hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp_div", ccm_base + 0x45d0, 0, &share_count_media);
+
+       hws[IMX8MP_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", ccm_base + 0x45e0, 0);
+       hws[IMX8MP_CLK_HDMI_ROOT] = imx_clk_hw_gate4("hdmi_root_clk", "hdmi_axi", ccm_base + 0x45f0, 0);
+       hws[IMX8MP_CLK_TSENSOR_ROOT] = imx_clk_hw_gate4("tsensor_root_clk", "ipg_root", ccm_base + 0x4620, 0);
+       hws[IMX8MP_CLK_VPU_ROOT] = imx_clk_hw_gate4("vpu_root_clk", "vpu_bus", ccm_base + 0x4630, 0);
+       hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "ipg_root", ccm_base + 0x4650, 0);
+
+       hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+                                            hws[IMX8MP_CLK_A53_DIV]->clk,
+                                            hws[IMX8MP_CLK_A53_SRC]->clk,
+                                            hws[IMX8MP_ARM_PLL_OUT]->clk,
+                                            hws[IMX8MP_SYS_PLL1_800M]->clk);
+
+       imx_check_clk_hws(hws, IMX8MP_CLK_END);
+
+       of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+
+       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+               int index = uart_clk_ids[i];
+
+               uart_clks[i] = &hws[index]->clk;
+       }
+
+       imx_register_uart_clocks(uart_clks);
+
+       return 0;
+}
+
+static const struct of_device_id imx8mp_clk_of_match[] = {
+       { .compatible = "fsl,imx8mp-ccm" },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_clk_of_match);
+
+static struct platform_driver imx8mp_clk_driver = {
+       .probe = imx8mp_clocks_probe,
+       .driver = {
+               .name = "imx8mp-ccm",
+               /*
+                * Disable bind attributes: clocks are not removed and
+                * reloading the driver will crash or break devices.
+                */
+               .suppress_bind_attrs = true,
+               .of_match_table = of_match_ptr(imx8mp_clk_of_match),
+       },
+};
+module_platform_driver(imx8mp_clk_driver);
index 5f10a60..4c0edca 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/types.h>
+#include <linux/slab.h>
 #include <linux/platform_device.h>
 
 #include "clk.h"
@@ -24,8 +25,6 @@ static u32 share_count_sai6;
 static u32 share_count_dcss;
 static u32 share_count_nand;
 
-static struct clk *clks[IMX8MQ_CLK_END];
-
 static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
 static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
 static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
@@ -269,124 +268,133 @@ static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "os
 static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m",
                                          "sys3_pll_out", "audio_pll1_out", "video_pll1_out", "ckil", };
 
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
 
-static struct clk ** const uart_clks[] = {
-       &clks[IMX8MQ_CLK_UART1_ROOT],
-       &clks[IMX8MQ_CLK_UART2_ROOT],
-       &clks[IMX8MQ_CLK_UART3_ROOT],
-       &clks[IMX8MQ_CLK_UART4_ROOT],
-       NULL
+static const int uart_clk_ids[] = {
+       IMX8MQ_CLK_UART1_ROOT,
+       IMX8MQ_CLK_UART2_ROOT,
+       IMX8MQ_CLK_UART3_ROOT,
+       IMX8MQ_CLK_UART4_ROOT,
 };
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
 
 static int imx8mq_clocks_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        void __iomem *base;
-       int err;
+       int err, i;
+
+       clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+                                         IMX8MQ_CLK_END), GFP_KERNEL);
+       if (WARN_ON(!clk_hw_data))
+               return -ENOMEM;
+
+       clk_hw_data->num = IMX8MQ_CLK_END;
+       hws = clk_hw_data->hws;
 
-       clks[IMX8MQ_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
-       clks[IMX8MQ_CLK_32K] = of_clk_get_by_name(np, "ckil");
-       clks[IMX8MQ_CLK_25M] = of_clk_get_by_name(np, "osc_25m");
-       clks[IMX8MQ_CLK_27M] = of_clk_get_by_name(np, "osc_27m");
-       clks[IMX8MQ_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
-       clks[IMX8MQ_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
-       clks[IMX8MQ_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
-       clks[IMX8MQ_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+       hws[IMX8MQ_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+       hws[IMX8MQ_CLK_32K] = imx_obtain_fixed_clk_hw(np, "ckil");
+       hws[IMX8MQ_CLK_25M] = imx_obtain_fixed_clk_hw(np, "osc_25m");
+       hws[IMX8MQ_CLK_27M] = imx_obtain_fixed_clk_hw(np, "osc_27m");
+       hws[IMX8MQ_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+       hws[IMX8MQ_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+       hws[IMX8MQ_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+       hws[IMX8MQ_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
        base = of_iomap(np, 0);
        if (WARN_ON(!base))
                return -ENOMEM;
 
-       clks[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MQ_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x20, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MQ_SYS3_PLL1_REF_SEL]  = imx_clk_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MQ_DRAM_PLL1_REF_SEL]  = imx_clk_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-       clks[IMX8MQ_VIDEO2_PLL1_REF_SEL] = imx_clk_mux("video2_pll1_ref_sel", base + 0x54, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-
-       clks[IMX8MQ_ARM_PLL_REF_DIV]    = imx_clk_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
-       clks[IMX8MQ_GPU_PLL_REF_DIV]    = imx_clk_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
-       clks[IMX8MQ_VPU_PLL_REF_DIV]    = imx_clk_divider("vpu_pll_ref_div", "vpu_pll_ref_sel", base + 0x20, 5, 6);
-       clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
-       clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
-       clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
-
-       clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
-       clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
-       clks[IMX8MQ_VPU_PLL] = imx_clk_frac_pll("vpu_pll", "vpu_pll_ref_div", base + 0x20);
-       clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
-       clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
-       clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
+       hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MQ_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x20, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MQ_SYS3_PLL1_REF_SEL]   = imx_clk_hw_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MQ_DRAM_PLL1_REF_SEL]   = imx_clk_hw_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMX8MQ_VIDEO2_PLL1_REF_SEL] = imx_clk_hw_mux("video2_pll1_ref_sel", base + 0x54, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+       hws[IMX8MQ_ARM_PLL_REF_DIV]     = imx_clk_hw_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
+       hws[IMX8MQ_GPU_PLL_REF_DIV]     = imx_clk_hw_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
+       hws[IMX8MQ_VPU_PLL_REF_DIV]     = imx_clk_hw_divider("vpu_pll_ref_div", "vpu_pll_ref_sel", base + 0x20, 5, 6);
+       hws[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_hw_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
+       hws[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_hw_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
+       hws[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_hw_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
+
+       hws[IMX8MQ_ARM_PLL] = imx_clk_hw_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
+       hws[IMX8MQ_GPU_PLL] = imx_clk_hw_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
+       hws[IMX8MQ_VPU_PLL] = imx_clk_hw_frac_pll("vpu_pll", "vpu_pll_ref_div", base + 0x20);
+       hws[IMX8MQ_AUDIO_PLL1] = imx_clk_hw_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
+       hws[IMX8MQ_AUDIO_PLL2] = imx_clk_hw_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
+       hws[IMX8MQ_VIDEO_PLL1] = imx_clk_hw_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
 
        /* PLL bypass out */
-       clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
-       clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
-       clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
-       clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
-       clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
-       clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
+       hws[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_hw_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
+       hws[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_hw_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
+       hws[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
+       hws[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
+       hws[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
 
        /* PLL OUT GATE */
-       clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
-       clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
-       clks[IMX8MQ_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x20, 21);
-       clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
-       clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
-       clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
-
-       clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_fixed("sys1_pll_out", 800000000);
-       clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_fixed("sys2_pll_out", 1000000000);
-       clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 0, base + 0x48, CLK_IS_CRITICAL);
-       clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL);
-       clks[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_sccg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
+       hws[IMX8MQ_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
+       hws[IMX8MQ_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
+       hws[IMX8MQ_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x20, 21);
+       hws[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
+       hws[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
+       hws[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
+
+       hws[IMX8MQ_SYS1_PLL_OUT] = imx_clk_hw_fixed("sys1_pll_out", 800000000);
+       hws[IMX8MQ_SYS2_PLL_OUT] = imx_clk_hw_fixed("sys2_pll_out", 1000000000);
+       hws[IMX8MQ_SYS3_PLL_OUT] = imx_clk_hw_sscg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 0, base + 0x48, CLK_IS_CRITICAL);
+       hws[IMX8MQ_DRAM_PLL_OUT] = imx_clk_hw_sscg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+       hws[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_hw_sscg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
 
        /* SYS PLL1 fixed output */
-       clks[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
-       clks[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
-       clks[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
-       clks[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
-       clks[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
-       clks[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
-       clks[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
-       clks[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
-       clks[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
-
-       clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
-       clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
-       clks[IMX8MQ_SYS1_PLL_100M] = imx_clk_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
-       clks[IMX8MQ_SYS1_PLL_133M] = imx_clk_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
-       clks[IMX8MQ_SYS1_PLL_160M] = imx_clk_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
-       clks[IMX8MQ_SYS1_PLL_200M] = imx_clk_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
-       clks[IMX8MQ_SYS1_PLL_266M] = imx_clk_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
-       clks[IMX8MQ_SYS1_PLL_400M] = imx_clk_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
-       clks[IMX8MQ_SYS1_PLL_800M] = imx_clk_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
+       hws[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_hw_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
+       hws[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_hw_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
+       hws[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_hw_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
+       hws[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_hw_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
+       hws[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_hw_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
+       hws[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_hw_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
+       hws[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_hw_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
+       hws[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_hw_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
+       hws[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_hw_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
+
+       hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
+       hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
+       hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
+       hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
+       hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
+       hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
+       hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
+       hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
+       hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
 
        /* SYS PLL2 fixed output */
-       clks[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
-       clks[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
-       clks[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
-       clks[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
-       clks[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
-       clks[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
-       clks[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
-       clks[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
-       clks[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
-
-       clks[IMX8MQ_SYS2_PLL_50M] = imx_clk_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
-       clks[IMX8MQ_SYS2_PLL_100M] = imx_clk_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
-       clks[IMX8MQ_SYS2_PLL_125M] = imx_clk_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
-       clks[IMX8MQ_SYS2_PLL_166M] = imx_clk_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
-       clks[IMX8MQ_SYS2_PLL_200M] = imx_clk_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
-       clks[IMX8MQ_SYS2_PLL_250M] = imx_clk_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
-       clks[IMX8MQ_SYS2_PLL_333M] = imx_clk_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
-       clks[IMX8MQ_SYS2_PLL_500M] = imx_clk_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
-       clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
+       hws[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_hw_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
+       hws[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_hw_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
+       hws[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_hw_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
+       hws[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_hw_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
+       hws[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_hw_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
+       hws[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_hw_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
+       hws[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_hw_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
+       hws[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_hw_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
+       hws[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_hw_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
+
+       hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
+       hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
+       hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
+       hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
+       hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
+       hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
+       hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
+       hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
+       hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
 
        np = dev->of_node;
        base = devm_platform_ioremap_resource(pdev, 0);
@@ -394,206 +402,213 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
                return PTR_ERR(base);
 
        /* CORE */
-       clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
-       clks[IMX8MQ_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
-       clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
-       clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3,  imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
-       clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels,  ARRAY_SIZE(imx8mq_gpu_shader_sels));
-
-       clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
-       clks[IMX8MQ_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
-       clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
-       clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
-       clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
-
-       clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
-       clks[IMX8MQ_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
-       clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
-       clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
-       clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+       hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
+       hws[IMX8MQ_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
+       hws[IMX8MQ_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
+       hws[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3,  imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
+       hws[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels,  ARRAY_SIZE(imx8mq_gpu_shader_sels));
+
+       hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
+       hws[IMX8MQ_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+       hws[IMX8MQ_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+       hws[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
+       hws[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
+
+       hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+       hws[IMX8MQ_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+       hws[IMX8MQ_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+       hws[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
+       hws[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
 
        /* BUS */
-       clks[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
-       clks[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
-       clks[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
-       clks[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
-       clks[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
-       clks[IMX8MQ_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
-       clks[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
-       clks[IMX8MQ_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
-       clks[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
-       clks[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
-       clks[IMX8MQ_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
-       clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
+       hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
+       hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
+       hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
+       hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
+       hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
+       hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
+       hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
+       hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
+       hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
+       hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
+       hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
+       hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
 
        /* AHB */
        /* AHB clock is used by the AHB bus therefore marked as critical */
-       clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
-       clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
+       hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
+       hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
 
        /* IPG */
-       clks[IMX8MQ_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
-       clks[IMX8MQ_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+       hws[IMX8MQ_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+       hws[IMX8MQ_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+       /*
+        * DRAM clocks are manipulated from TF-A outside clock framework.
+        * Mark with GET_RATE_NOCACHE to always read div value from hardware
+        */
+       hws[IMX8MQ_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
+       hws[IMX8MQ_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+       hws[IMX8MQ_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mq_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
 
        /* IP */
-       clks[IMX8MQ_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
-
-       clks[IMX8MQ_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000);
-       clks[IMX8MQ_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mq_dram_apb_sels, base + 0xa080);
-       clks[IMX8MQ_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
-       clks[IMX8MQ_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mq_vpu_g2_sels, base + 0xa180);
-       clks[IMX8MQ_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mq_disp_dtrc_sels, base + 0xa200);
-       clks[IMX8MQ_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mq_disp_dc8000_sels, base + 0xa280);
-       clks[IMX8MQ_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mq_pcie1_ctrl_sels, base + 0xa300);
-       clks[IMX8MQ_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mq_pcie1_phy_sels, base + 0xa380);
-       clks[IMX8MQ_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mq_pcie1_aux_sels, base + 0xa400);
-       clks[IMX8MQ_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mq_dc_pixel_sels, base + 0xa480);
-       clks[IMX8MQ_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mq_lcdif_pixel_sels, base + 0xa500);
-       clks[IMX8MQ_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mq_sai1_sels, base + 0xa580);
-       clks[IMX8MQ_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mq_sai2_sels, base + 0xa600);
-       clks[IMX8MQ_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mq_sai3_sels, base + 0xa680);
-       clks[IMX8MQ_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mq_sai4_sels, base + 0xa700);
-       clks[IMX8MQ_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mq_sai5_sels, base + 0xa780);
-       clks[IMX8MQ_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mq_sai6_sels, base + 0xa800);
-       clks[IMX8MQ_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mq_spdif1_sels, base + 0xa880);
-       clks[IMX8MQ_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mq_spdif2_sels, base + 0xa900);
-       clks[IMX8MQ_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mq_enet_ref_sels, base + 0xa980);
-       clks[IMX8MQ_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mq_enet_timer_sels, base + 0xaa00);
-       clks[IMX8MQ_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mq_enet_phy_sels, base + 0xaa80);
-       clks[IMX8MQ_CLK_NAND] = imx8m_clk_composite("nand", imx8mq_nand_sels, base + 0xab00);
-       clks[IMX8MQ_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mq_qspi_sels, base + 0xab80);
-       clks[IMX8MQ_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mq_usdhc1_sels, base + 0xac00);
-       clks[IMX8MQ_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mq_usdhc2_sels, base + 0xac80);
-       clks[IMX8MQ_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mq_i2c1_sels, base + 0xad00);
-       clks[IMX8MQ_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mq_i2c2_sels, base + 0xad80);
-       clks[IMX8MQ_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mq_i2c3_sels, base + 0xae00);
-       clks[IMX8MQ_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mq_i2c4_sels, base + 0xae80);
-       clks[IMX8MQ_CLK_UART1] = imx8m_clk_composite("uart1", imx8mq_uart1_sels, base + 0xaf00);
-       clks[IMX8MQ_CLK_UART2] = imx8m_clk_composite("uart2", imx8mq_uart2_sels, base + 0xaf80);
-       clks[IMX8MQ_CLK_UART3] = imx8m_clk_composite("uart3", imx8mq_uart3_sels, base + 0xb000);
-       clks[IMX8MQ_CLK_UART4] = imx8m_clk_composite("uart4", imx8mq_uart4_sels, base + 0xb080);
-       clks[IMX8MQ_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mq_usb_core_sels, base + 0xb100);
-       clks[IMX8MQ_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mq_usb_phy_sels, base + 0xb180);
-       clks[IMX8MQ_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mq_gic_sels, base + 0xb200);
-       clks[IMX8MQ_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mq_ecspi1_sels, base + 0xb280);
-       clks[IMX8MQ_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mq_ecspi2_sels, base + 0xb300);
-       clks[IMX8MQ_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mq_pwm1_sels, base + 0xb380);
-       clks[IMX8MQ_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mq_pwm2_sels, base + 0xb400);
-       clks[IMX8MQ_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mq_pwm3_sels, base + 0xb480);
-       clks[IMX8MQ_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mq_pwm4_sels, base + 0xb500);
-       clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
-       clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
-       clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
-       clks[IMX8MQ_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
-       clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
-       clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
-       clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
-       clks[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
-       clks[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
-       clks[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
-       clks[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6);
-       clks[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
-       clks[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
-       clks[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
-       clks[IMX8MQ_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mq_csi2_core_sels, base + 0xbe80);
-       clks[IMX8MQ_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mq_csi2_phy_sels, base + 0xbf00);
-       clks[IMX8MQ_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mq_csi2_esc_sels, base + 0xbf80);
-       clks[IMX8MQ_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mq_pcie2_ctrl_sels, base + 0xc000);
-       clks[IMX8MQ_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mq_pcie2_phy_sels, base + 0xc080);
-       clks[IMX8MQ_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mq_pcie2_aux_sels, base + 0xc100);
-       clks[IMX8MQ_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mq_ecspi3_sels, base + 0xc180);
-
-       clks[IMX8MQ_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
-       clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
-       clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
-       clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
-       clks[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
-       clks[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
-       clks[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
-       clks[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
-       clks[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
-       clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
-       clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
-       clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
-       clks[IMX8MQ_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
-       clks[IMX8MQ_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
-       clks[IMX8MQ_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
-       clks[IMX8MQ_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
-       clks[IMX8MQ_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
-       clks[IMX8MQ_CLK_PCIE2_ROOT] = imx_clk_gate4("pcie2_root_clk", "pcie2_ctrl", base + 0x4640, 0);
-       clks[IMX8MQ_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
-       clks[IMX8MQ_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
-       clks[IMX8MQ_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
-       clks[IMX8MQ_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
-       clks[IMX8MQ_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
-       clks[IMX8MQ_CLK_RAWNAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
-       clks[IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
-       clks[IMX8MQ_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
-       clks[IMX8MQ_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
-       clks[IMX8MQ_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
-       clks[IMX8MQ_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_root", base + 0x4340, 0, &share_count_sai2);
-       clks[IMX8MQ_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
-       clks[IMX8MQ_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_root", base + 0x4350, 0, &share_count_sai3);
-       clks[IMX8MQ_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
-       clks[IMX8MQ_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
-       clks[IMX8MQ_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
-       clks[IMX8MQ_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
-       clks[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
-       clks[IMX8MQ_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
-       clks[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
-       clks[IMX8MQ_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
-       clks[IMX8MQ_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
-       clks[IMX8MQ_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
-       clks[IMX8MQ_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
-       clks[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
-       clks[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_gate4("usb2_ctrl_root_clk", "usb_bus", base + 0x44e0, 0);
-       clks[IMX8MQ_CLK_USB1_PHY_ROOT] = imx_clk_gate4("usb1_phy_root_clk", "usb_phy_ref", base + 0x44f0, 0);
-       clks[IMX8MQ_CLK_USB2_PHY_ROOT] = imx_clk_gate4("usb2_phy_root_clk", "usb_phy_ref", base + 0x4500, 0);
-       clks[IMX8MQ_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
-       clks[IMX8MQ_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
-       clks[IMX8MQ_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
-       clks[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
-       clks[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
-       clks[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
-       clks[IMX8MQ_CLK_GPU_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
-       clks[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
-       clks[IMX8MQ_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
-       clks[IMX8MQ_CLK_DISP_AXI_ROOT]  = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
-       clks[IMX8MQ_CLK_DISP_APB_ROOT]  = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
-       clks[IMX8MQ_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
-       clks[IMX8MQ_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
-       clks[IMX8MQ_CLK_VPU_DEC_ROOT] = imx_clk_gate2_flags("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
-       clks[IMX8MQ_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
-       clks[IMX8MQ_CLK_CSI2_ROOT] = imx_clk_gate4("csi2_root_clk", "csi2_core", base + 0x4660, 0);
-       clks[IMX8MQ_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
-       clks[IMX8MQ_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
-
-       clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8);
-       clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
-
-       clks[IMX8MQ_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
-                                          clks[IMX8MQ_CLK_A53_DIV],
-                                          clks[IMX8MQ_CLK_A53_SRC],
-                                          clks[IMX8MQ_ARM_PLL_OUT],
-                                          clks[IMX8MQ_SYS1_PLL_800M]);
-
-       imx_check_clocks(clks, ARRAY_SIZE(clks));
-
-       clk_data.clks = clks;
-       clk_data.clk_num = ARRAY_SIZE(clks);
-
-       err = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       hws[IMX8MQ_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
+       hws[IMX8MQ_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mq_vpu_g2_sels, base + 0xa180);
+       hws[IMX8MQ_CLK_DISP_DTRC] = imx8m_clk_hw_composite("disp_dtrc", imx8mq_disp_dtrc_sels, base + 0xa200);
+       hws[IMX8MQ_CLK_DISP_DC8000] = imx8m_clk_hw_composite("disp_dc8000", imx8mq_disp_dc8000_sels, base + 0xa280);
+       hws[IMX8MQ_CLK_PCIE1_CTRL] = imx8m_clk_hw_composite("pcie1_ctrl", imx8mq_pcie1_ctrl_sels, base + 0xa300);
+       hws[IMX8MQ_CLK_PCIE1_PHY] = imx8m_clk_hw_composite("pcie1_phy", imx8mq_pcie1_phy_sels, base + 0xa380);
+       hws[IMX8MQ_CLK_PCIE1_AUX] = imx8m_clk_hw_composite("pcie1_aux", imx8mq_pcie1_aux_sels, base + 0xa400);
+       hws[IMX8MQ_CLK_DC_PIXEL] = imx8m_clk_hw_composite("dc_pixel", imx8mq_dc_pixel_sels, base + 0xa480);
+       hws[IMX8MQ_CLK_LCDIF_PIXEL] = imx8m_clk_hw_composite("lcdif_pixel", imx8mq_lcdif_pixel_sels, base + 0xa500);
+       hws[IMX8MQ_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mq_sai1_sels, base + 0xa580);
+       hws[IMX8MQ_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mq_sai2_sels, base + 0xa600);
+       hws[IMX8MQ_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mq_sai3_sels, base + 0xa680);
+       hws[IMX8MQ_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mq_sai4_sels, base + 0xa700);
+       hws[IMX8MQ_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mq_sai5_sels, base + 0xa780);
+       hws[IMX8MQ_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mq_sai6_sels, base + 0xa800);
+       hws[IMX8MQ_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mq_spdif1_sels, base + 0xa880);
+       hws[IMX8MQ_CLK_SPDIF2] = imx8m_clk_hw_composite("spdif2", imx8mq_spdif2_sels, base + 0xa900);
+       hws[IMX8MQ_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mq_enet_ref_sels, base + 0xa980);
+       hws[IMX8MQ_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mq_enet_timer_sels, base + 0xaa00);
+       hws[IMX8MQ_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mq_enet_phy_sels, base + 0xaa80);
+       hws[IMX8MQ_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mq_nand_sels, base + 0xab00);
+       hws[IMX8MQ_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mq_qspi_sels, base + 0xab80);
+       hws[IMX8MQ_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mq_usdhc1_sels, base + 0xac00);
+       hws[IMX8MQ_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mq_usdhc2_sels, base + 0xac80);
+       hws[IMX8MQ_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mq_i2c1_sels, base + 0xad00);
+       hws[IMX8MQ_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mq_i2c2_sels, base + 0xad80);
+       hws[IMX8MQ_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mq_i2c3_sels, base + 0xae00);
+       hws[IMX8MQ_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mq_i2c4_sels, base + 0xae80);
+       hws[IMX8MQ_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mq_uart1_sels, base + 0xaf00);
+       hws[IMX8MQ_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mq_uart2_sels, base + 0xaf80);
+       hws[IMX8MQ_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mq_uart3_sels, base + 0xb000);
+       hws[IMX8MQ_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mq_uart4_sels, base + 0xb080);
+       hws[IMX8MQ_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mq_usb_core_sels, base + 0xb100);
+       hws[IMX8MQ_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mq_usb_phy_sels, base + 0xb180);
+       hws[IMX8MQ_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mq_gic_sels, base + 0xb200);
+       hws[IMX8MQ_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mq_ecspi1_sels, base + 0xb280);
+       hws[IMX8MQ_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mq_ecspi2_sels, base + 0xb300);
+       hws[IMX8MQ_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mq_pwm1_sels, base + 0xb380);
+       hws[IMX8MQ_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mq_pwm2_sels, base + 0xb400);
+       hws[IMX8MQ_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mq_pwm3_sels, base + 0xb480);
+       hws[IMX8MQ_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mq_pwm4_sels, base + 0xb500);
+       hws[IMX8MQ_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
+       hws[IMX8MQ_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
+       hws[IMX8MQ_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
+       hws[IMX8MQ_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
+       hws[IMX8MQ_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
+       hws[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
+       hws[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
+       hws[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
+       hws[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_hw_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
+       hws[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_hw_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
+       hws[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_hw_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6);
+       hws[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_hw_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
+       hws[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
+       hws[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_hw_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
+       hws[IMX8MQ_CLK_CSI2_CORE] = imx8m_clk_hw_composite("csi2_core", imx8mq_csi2_core_sels, base + 0xbe80);
+       hws[IMX8MQ_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mq_csi2_phy_sels, base + 0xbf00);
+       hws[IMX8MQ_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mq_csi2_esc_sels, base + 0xbf80);
+       hws[IMX8MQ_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mq_pcie2_ctrl_sels, base + 0xc000);
+       hws[IMX8MQ_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mq_pcie2_phy_sels, base + 0xc080);
+       hws[IMX8MQ_CLK_PCIE2_AUX] = imx8m_clk_hw_composite("pcie2_aux", imx8mq_pcie2_aux_sels, base + 0xc100);
+       hws[IMX8MQ_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mq_ecspi3_sels, base + 0xc180);
+
+       hws[IMX8MQ_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+       hws[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+       hws[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+       hws[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+       hws[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+       hws[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+       hws[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+       hws[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+       hws[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+       hws[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+       hws[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+       hws[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+       hws[IMX8MQ_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+       hws[IMX8MQ_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+       hws[IMX8MQ_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+       hws[IMX8MQ_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+       hws[IMX8MQ_CLK_PCIE1_ROOT] = imx_clk_hw_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+       hws[IMX8MQ_CLK_PCIE2_ROOT] = imx_clk_hw_gate4("pcie2_root_clk", "pcie2_ctrl", base + 0x4640, 0);
+       hws[IMX8MQ_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+       hws[IMX8MQ_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+       hws[IMX8MQ_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+       hws[IMX8MQ_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+       hws[IMX8MQ_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+       hws[IMX8MQ_CLK_RAWNAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+       hws[IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+       hws[IMX8MQ_CLK_SAI1_ROOT] = imx_clk_hw_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+       hws[IMX8MQ_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+       hws[IMX8MQ_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+       hws[IMX8MQ_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_root", base + 0x4340, 0, &share_count_sai2);
+       hws[IMX8MQ_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+       hws[IMX8MQ_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_root", base + 0x4350, 0, &share_count_sai3);
+       hws[IMX8MQ_CLK_SAI4_ROOT] = imx_clk_hw_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+       hws[IMX8MQ_CLK_SAI4_IPG] = imx_clk_hw_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+       hws[IMX8MQ_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+       hws[IMX8MQ_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+       hws[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+       hws[IMX8MQ_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+       hws[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
+       hws[IMX8MQ_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+       hws[IMX8MQ_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+       hws[IMX8MQ_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+       hws[IMX8MQ_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+       hws[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+       hws[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_hw_gate4("usb2_ctrl_root_clk", "usb_bus", base + 0x44e0, 0);
+       hws[IMX8MQ_CLK_USB1_PHY_ROOT] = imx_clk_hw_gate4("usb1_phy_root_clk", "usb_phy_ref", base + 0x44f0, 0);
+       hws[IMX8MQ_CLK_USB2_PHY_ROOT] = imx_clk_hw_gate4("usb2_phy_root_clk", "usb_phy_ref", base + 0x4500, 0);
+       hws[IMX8MQ_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+       hws[IMX8MQ_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+       hws[IMX8MQ_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+       hws[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+       hws[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+       hws[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_hw_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+       hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
+       hws[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_hw_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+       hws[IMX8MQ_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
+       hws[IMX8MQ_CLK_DISP_AXI_ROOT]  = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
+       hws[IMX8MQ_CLK_DISP_APB_ROOT]  = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
+       hws[IMX8MQ_CLK_DISP_RTRM_ROOT] = imx_clk_hw_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
+       hws[IMX8MQ_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+       hws[IMX8MQ_CLK_VPU_DEC_ROOT] = imx_clk_hw_gate2_flags("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+       hws[IMX8MQ_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+       hws[IMX8MQ_CLK_CSI2_ROOT] = imx_clk_hw_gate4("csi2_root_clk", "csi2_core", base + 0x4660, 0);
+       hws[IMX8MQ_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+       hws[IMX8MQ_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+
+       hws[IMX8MQ_GPT_3M_CLK] = imx_clk_hw_fixed_factor("gpt_3m", "osc_25m", 1, 8);
+       hws[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+
+       hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+                                          hws[IMX8MQ_CLK_A53_DIV]->clk,
+                                          hws[IMX8MQ_CLK_A53_SRC]->clk,
+                                          hws[IMX8MQ_ARM_PLL_OUT]->clk,
+                                          hws[IMX8MQ_SYS1_PLL_800M]->clk);
+
+       imx_check_clk_hws(hws, IMX8MQ_CLK_END);
+
+       err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
        if (err < 0) {
-               dev_err(dev, "failed to register clks for i.MX8MQ\n");
-               goto unregister_clks;
+               dev_err(dev, "failed to register hws for i.MX8MQ\n");
+               goto unregister_hws;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+               int index = uart_clk_ids[i];
+
+               uart_hws[i] = &hws[index]->clk;
        }
 
-       imx_register_uart_clocks(uart_clks);
+       imx_register_uart_clocks(uart_hws);
 
        return 0;
 
-unregister_clks:
-       imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+       imx_unregister_hw_clocks(hws, IMX8MQ_CLK_END);
 
        return err;
 }
@@ -609,6 +624,11 @@ static struct platform_driver imx8mq_clk_driver = {
        .probe = imx8mq_clocks_probe,
        .driver = {
                .name = "imx8mq-ccm",
+               /*
+                * Disable bind attributes: clocks are not removed and
+                * reloading the driver will crash or break devices.
+                */
+               .suppress_bind_attrs = true,
                .of_match_table = of_match_ptr(imx8mq_clk_of_match),
        },
 };
index c0aff7c..04c8ee3 100644 (file)
@@ -173,6 +173,17 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
        if (!ss_lpcg)
                return -ENODEV;
 
+       /*
+        * Please don't replace this with devm_platform_ioremap_resource.
+        *
+        * devm_platform_ioremap_resource calls devm_ioremap_resource which
+        * differs from devm_ioremap by also calling devm_request_mem_region
+        * and preventing other mappings in the same area.
+        *
+        * On imx8 the LPCG nodes map entire subsystems and overlap
+        * peripherals, this means that using devm_platform_ioremap_resource
+        * will cause many devices to fail to probe including serial ports.
+        */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
                return -EINVAL;
index a03bbed..de93ce7 100644 (file)
@@ -166,7 +166,7 @@ static const struct clk_ops clk_pfdv2_ops = {
        .is_enabled     = clk_pfdv2_is_enabled,
 };
 
-struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
                             void __iomem *reg, u8 idx)
 {
        struct clk_init_data init;
index 3636c80..5b0519a 100644 (file)
@@ -67,6 +67,13 @@ struct imx_pll14xx_clk imx_1443x_pll = {
        .rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
 };
 
+struct imx_pll14xx_clk imx_1443x_dram_pll = {
+       .type = PLL_1443X,
+       .rate_table = imx_pll1443x_tbl,
+       .rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
+       .flags = CLK_GET_RATE_NOCACHE,
+};
+
 struct imx_pll14xx_clk imx_1416x_pll = {
        .type = PLL_1416X,
        .rate_table = imx_pll1416x_tbl,
@@ -369,13 +376,14 @@ static const struct clk_ops clk_pll1443x_ops = {
        .set_rate       = clk_pll1443x_set_rate,
 };
 
-struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
-                           void __iomem *base,
-                           const struct imx_pll14xx_clk *pll_clk)
+struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
+                                 void __iomem *base,
+                                 const struct imx_pll14xx_clk *pll_clk)
 {
        struct clk_pll14xx *pll;
-       struct clk *clk;
+       struct clk_hw *hw;
        struct clk_init_data init;
+       int ret;
        u32 val;
 
        pll = kzalloc(sizeof(*pll), GFP_KERNEL);
@@ -412,12 +420,15 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
        val &= ~BYPASS_MASK;
        writel_relaxed(val, pll->base + GNRL_CTL);
 
-       clk = clk_register(NULL, &pll->hw);
-       if (IS_ERR(clk)) {
-               pr_err("%s: failed to register pll %s %lu\n",
-                       __func__, name, PTR_ERR(clk));
+       hw = &pll->hw;
+
+       ret = clk_hw_register(NULL, hw);
+       if (ret) {
+               pr_err("%s: failed to register pll %s %d\n",
+                       __func__, name, ret);
                kfree(pll);
+               return ERR_PTR(ret);
        }
 
-       return clk;
+       return hw;
 }
index 4ba9973..de4f8a4 100644 (file)
@@ -111,12 +111,13 @@ static const struct clk_ops clk_pllv1_ops = {
        .recalc_rate = clk_pllv1_recalc_rate,
 };
 
-struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
+struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name,
                const char *parent, void __iomem *base)
 {
        struct clk_pllv1 *pll;
-       struct clk *clk;
+       struct clk_hw *hw;
        struct clk_init_data init;
+       int ret;
 
        pll = kmalloc(sizeof(*pll), GFP_KERNEL);
        if (!pll)
@@ -132,10 +133,13 @@ struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
        init.num_parents = 1;
 
        pll->hw.init = &init;
+       hw = &pll->hw;
 
-       clk = clk_register(NULL, &pll->hw);
-       if (IS_ERR(clk))
+       ret = clk_hw_register(NULL, hw);
+       if (ret) {
                kfree(pll);
+               return ERR_PTR(ret);
+       }
 
-       return clk;
+       return hw;
 }
index eeba3cb..ff17f06 100644 (file)
@@ -239,12 +239,13 @@ static const struct clk_ops clk_pllv2_ops = {
        .set_rate = clk_pllv2_set_rate,
 };
 
-struct clk *imx_clk_pllv2(const char *name, const char *parent,
+struct clk_hw *imx_clk_hw_pllv2(const char *name, const char *parent,
                void __iomem *base)
 {
        struct clk_pllv2 *pll;
-       struct clk *clk;
+       struct clk_hw *hw;
        struct clk_init_data init;
+       int ret;
 
        pll = kzalloc(sizeof(*pll), GFP_KERNEL);
        if (!pll)
@@ -259,10 +260,13 @@ struct clk *imx_clk_pllv2(const char *name, const char *parent,
        init.num_parents = 1;
 
        pll->hw.init = &init;
+       hw = &pll->hw;
 
-       clk = clk_register(NULL, &pll->hw);
-       if (IS_ERR(clk))
+       ret = clk_hw_register(NULL, hw);
+       if (ret) {
                kfree(pll);
+               return ERR_PTR(ret);
+       }
 
-       return clk;
+       return hw;
 }
index 8155b12..f51a800 100644 (file)
@@ -206,7 +206,7 @@ static const struct clk_ops clk_pllv4_ops = {
        .is_enabled     = clk_pllv4_is_enabled,
 };
 
-struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
                          void __iomem *base)
 {
        struct clk_pllv4 *pll;
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
deleted file mode 100644 (file)
index 5d65f65..0000000
+++ /dev/null
@@ -1,549 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Copyright 2018 NXP.
- *
- * This driver supports the SCCG plls found in the imx8m SOCs
- *
- * Documentation for this SCCG pll can be found at:
- *   https://www.nxp.com/docs/en/reference-manual/IMX8MDQLQRM.pdf#page=834
- */
-
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/slab.h>
-#include <linux/bitfield.h>
-
-#include "clk.h"
-
-/* PLL CFGs */
-#define PLL_CFG0               0x0
-#define PLL_CFG1               0x4
-#define PLL_CFG2               0x8
-
-#define PLL_DIVF1_MASK         GENMASK(18, 13)
-#define PLL_DIVF2_MASK         GENMASK(12, 7)
-#define PLL_DIVR1_MASK         GENMASK(27, 25)
-#define PLL_DIVR2_MASK         GENMASK(24, 19)
-#define PLL_DIVQ_MASK           GENMASK(6, 1)
-#define PLL_REF_MASK           GENMASK(2, 0)
-
-#define PLL_LOCK_MASK          BIT(31)
-#define PLL_PD_MASK            BIT(7)
-
-/* These are the specification limits for the SSCG PLL */
-#define PLL_REF_MIN_FREQ               25000000UL
-#define PLL_REF_MAX_FREQ               235000000UL
-
-#define PLL_STAGE1_MIN_FREQ            1600000000UL
-#define PLL_STAGE1_MAX_FREQ            2400000000UL
-
-#define PLL_STAGE1_REF_MIN_FREQ                25000000UL
-#define PLL_STAGE1_REF_MAX_FREQ                54000000UL
-
-#define PLL_STAGE2_MIN_FREQ            1200000000UL
-#define PLL_STAGE2_MAX_FREQ            2400000000UL
-
-#define PLL_STAGE2_REF_MIN_FREQ                54000000UL
-#define PLL_STAGE2_REF_MAX_FREQ                75000000UL
-
-#define PLL_OUT_MIN_FREQ               20000000UL
-#define PLL_OUT_MAX_FREQ               1200000000UL
-
-#define PLL_DIVR1_MAX                  7
-#define PLL_DIVR2_MAX                  63
-#define PLL_DIVF1_MAX                  63
-#define PLL_DIVF2_MAX                  63
-#define PLL_DIVQ_MAX                   63
-
-#define PLL_BYPASS_NONE                        0x0
-#define PLL_BYPASS1                    0x2
-#define PLL_BYPASS2                    0x1
-
-#define SSCG_PLL_BYPASS1_MASK           BIT(5)
-#define SSCG_PLL_BYPASS2_MASK           BIT(4)
-#define SSCG_PLL_BYPASS_MASK           GENMASK(5, 4)
-
-#define PLL_SCCG_LOCK_TIMEOUT          70
-
-struct clk_sccg_pll_setup {
-       int divr1, divf1;
-       int divr2, divf2;
-       int divq;
-       int bypass;
-
-       uint64_t vco1;
-       uint64_t vco2;
-       uint64_t fout;
-       uint64_t ref;
-       uint64_t ref_div1;
-       uint64_t ref_div2;
-       uint64_t fout_request;
-       int fout_error;
-};
-
-struct clk_sccg_pll {
-       struct clk_hw   hw;
-       const struct clk_ops  ops;
-
-       void __iomem *base;
-
-       struct clk_sccg_pll_setup setup;
-
-       u8 parent;
-       u8 bypass1;
-       u8 bypass2;
-};
-
-#define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw)
-
-static int clk_sccg_pll_wait_lock(struct clk_sccg_pll *pll)
-{
-       u32 val;
-
-       val = readl_relaxed(pll->base + PLL_CFG0);
-
-       /* don't wait for lock if all plls are bypassed */
-       if (!(val & SSCG_PLL_BYPASS2_MASK))
-               return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK,
-                                               0, PLL_SCCG_LOCK_TIMEOUT);
-
-       return 0;
-}
-
-static int clk_sccg_pll2_check_match(struct clk_sccg_pll_setup *setup,
-                                       struct clk_sccg_pll_setup *temp_setup)
-{
-       int new_diff = temp_setup->fout - temp_setup->fout_request;
-       int diff = temp_setup->fout_error;
-
-       if (abs(diff) > abs(new_diff)) {
-               temp_setup->fout_error = new_diff;
-               memcpy(setup, temp_setup, sizeof(struct clk_sccg_pll_setup));
-
-               if (temp_setup->fout_request == temp_setup->fout)
-                       return 0;
-       }
-       return -1;
-}
-
-static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup,
-                               struct clk_sccg_pll_setup *temp_setup)
-{
-       int ret = -EINVAL;
-
-       for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX;
-            temp_setup->divq++) {
-               temp_setup->vco2 = temp_setup->vco1;
-               do_div(temp_setup->vco2, temp_setup->divr2 + 1);
-               temp_setup->vco2 *= 2;
-               temp_setup->vco2 *= temp_setup->divf2 + 1;
-               if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ &&
-                               temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) {
-                       temp_setup->fout = temp_setup->vco2;
-                       do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
-
-                       ret = clk_sccg_pll2_check_match(setup, temp_setup);
-                       if (!ret) {
-                               temp_setup->bypass = PLL_BYPASS1;
-                               return ret;
-                       }
-               }
-       }
-
-       return ret;
-}
-
-static int clk_sccg_divf2_lookup(struct clk_sccg_pll_setup *setup,
-                                       struct clk_sccg_pll_setup *temp_setup)
-{
-       int ret = -EINVAL;
-
-       for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
-            temp_setup->divf2++) {
-               ret = clk_sccg_divq_lookup(setup, temp_setup);
-               if (!ret)
-                       return ret;
-       }
-
-       return ret;
-}
-
-static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup,
-                               struct clk_sccg_pll_setup *temp_setup)
-{
-       int ret = -EINVAL;
-
-       for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX;
-            temp_setup->divr2++) {
-               temp_setup->ref_div2 = temp_setup->vco1;
-               do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
-               if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
-                   temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
-                       ret = clk_sccg_divf2_lookup(setup, temp_setup);
-                       if (!ret)
-                               return ret;
-               }
-       }
-
-       return ret;
-}
-
-static int clk_sccg_pll2_find_setup(struct clk_sccg_pll_setup *setup,
-                                       struct clk_sccg_pll_setup *temp_setup,
-                                       uint64_t ref)
-{
-
-       int ret = -EINVAL;
-
-       if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
-               return ret;
-
-       temp_setup->vco1 = ref;
-
-       ret = clk_sccg_divr2_lookup(setup, temp_setup);
-       return ret;
-}
-
-static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup,
-                               struct clk_sccg_pll_setup *temp_setup)
-{
-       int ret = -EINVAL;
-
-       for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX;
-            temp_setup->divf1++) {
-               uint64_t vco1 = temp_setup->ref;
-
-               do_div(vco1, temp_setup->divr1 + 1);
-               vco1 *= 2;
-               vco1 *= temp_setup->divf1 + 1;
-
-               ret = clk_sccg_pll2_find_setup(setup, temp_setup, vco1);
-               if (!ret) {
-                       temp_setup->bypass = PLL_BYPASS_NONE;
-                       return ret;
-               }
-       }
-
-       return ret;
-}
-
-static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup,
-                               struct clk_sccg_pll_setup *temp_setup)
-{
-       int ret = -EINVAL;
-
-       for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX;
-            temp_setup->divr1++) {
-               temp_setup->ref_div1 = temp_setup->ref;
-               do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
-               if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
-                   temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
-                       ret = clk_sccg_divf1_lookup(setup, temp_setup);
-                       if (!ret)
-                               return ret;
-               }
-       }
-
-       return ret;
-}
-
-static int clk_sccg_pll1_find_setup(struct clk_sccg_pll_setup *setup,
-                                       struct clk_sccg_pll_setup *temp_setup,
-                                       uint64_t ref)
-{
-
-       int ret = -EINVAL;
-
-       if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
-               return ret;
-
-       temp_setup->ref = ref;
-
-       ret = clk_sccg_divr1_lookup(setup, temp_setup);
-
-       return ret;
-}
-
-static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup,
-                                       uint64_t prate,
-                                       uint64_t rate, int try_bypass)
-{
-       struct clk_sccg_pll_setup temp_setup;
-       int ret = -EINVAL;
-
-       memset(&temp_setup, 0, sizeof(struct clk_sccg_pll_setup));
-       memset(setup, 0, sizeof(struct clk_sccg_pll_setup));
-
-       temp_setup.fout_error = PLL_OUT_MAX_FREQ;
-       temp_setup.fout_request = rate;
-
-       switch (try_bypass) {
-
-       case PLL_BYPASS2:
-               if (prate == rate) {
-                       setup->bypass = PLL_BYPASS2;
-                       setup->fout = rate;
-                       ret = 0;
-               }
-               break;
-
-       case PLL_BYPASS1:
-               ret = clk_sccg_pll2_find_setup(setup, &temp_setup, prate);
-               break;
-
-       case PLL_BYPASS_NONE:
-               ret = clk_sccg_pll1_find_setup(setup, &temp_setup, prate);
-               break;
-       }
-
-       return ret;
-}
-
-
-static int clk_sccg_pll_is_prepared(struct clk_hw *hw)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-
-       u32 val = readl_relaxed(pll->base + PLL_CFG0);
-
-       return (val & PLL_PD_MASK) ? 0 : 1;
-}
-
-static int clk_sccg_pll_prepare(struct clk_hw *hw)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-       u32 val;
-
-       val = readl_relaxed(pll->base + PLL_CFG0);
-       val &= ~PLL_PD_MASK;
-       writel_relaxed(val, pll->base + PLL_CFG0);
-
-       return clk_sccg_pll_wait_lock(pll);
-}
-
-static void clk_sccg_pll_unprepare(struct clk_hw *hw)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-       u32 val;
-
-       val = readl_relaxed(pll->base + PLL_CFG0);
-       val |= PLL_PD_MASK;
-       writel_relaxed(val, pll->base + PLL_CFG0);
-}
-
-static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
-                                        unsigned long parent_rate)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-       u32 val, divr1, divf1, divr2, divf2, divq;
-       u64 temp64;
-
-       val = readl_relaxed(pll->base + PLL_CFG2);
-       divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
-       divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
-       divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
-       divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
-       divq = FIELD_GET(PLL_DIVQ_MASK, val);
-
-       temp64 = parent_rate;
-
-       val = readl(pll->base + PLL_CFG0);
-       if (val & SSCG_PLL_BYPASS2_MASK) {
-               temp64 = parent_rate;
-       } else if (val & SSCG_PLL_BYPASS1_MASK) {
-               temp64 *= divf2;
-               do_div(temp64, (divr2 + 1) * (divq + 1));
-       } else {
-               temp64 *= 2;
-               temp64 *= (divf1 + 1) * (divf2 + 1);
-               do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1));
-       }
-
-       return temp64;
-}
-
-static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-                           unsigned long parent_rate)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-       struct clk_sccg_pll_setup *setup = &pll->setup;
-       u32 val;
-
-       /* set bypass here too since the parent might be the same */
-       val = readl(pll->base + PLL_CFG0);
-       val &= ~SSCG_PLL_BYPASS_MASK;
-       val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
-       writel(val, pll->base + PLL_CFG0);
-
-       val = readl_relaxed(pll->base + PLL_CFG2);
-       val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
-       val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK);
-       val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1);
-       val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2);
-       val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1);
-       val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2);
-       val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
-       writel_relaxed(val, pll->base + PLL_CFG2);
-
-       return clk_sccg_pll_wait_lock(pll);
-}
-
-static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-       u32 val;
-       u8 ret = pll->parent;
-
-       val = readl(pll->base + PLL_CFG0);
-       if (val & SSCG_PLL_BYPASS2_MASK)
-               ret = pll->bypass2;
-       else if (val & SSCG_PLL_BYPASS1_MASK)
-               ret = pll->bypass1;
-       return ret;
-}
-
-static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-       u32 val;
-
-       val = readl(pll->base + PLL_CFG0);
-       val &= ~SSCG_PLL_BYPASS_MASK;
-       val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
-       writel(val, pll->base + PLL_CFG0);
-
-       return clk_sccg_pll_wait_lock(pll);
-}
-
-static int __clk_sccg_pll_determine_rate(struct clk_hw *hw,
-                                       struct clk_rate_request *req,
-                                       uint64_t min,
-                                       uint64_t max,
-                                       uint64_t rate,
-                                       int bypass)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-       struct clk_sccg_pll_setup *setup = &pll->setup;
-       struct clk_hw *parent_hw = NULL;
-       int bypass_parent_index;
-       int ret = -EINVAL;
-
-       req->max_rate = max;
-       req->min_rate = min;
-
-       switch (bypass) {
-       case PLL_BYPASS2:
-               bypass_parent_index = pll->bypass2;
-               break;
-       case PLL_BYPASS1:
-               bypass_parent_index = pll->bypass1;
-               break;
-       default:
-               bypass_parent_index = pll->parent;
-               break;
-       }
-
-       parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
-       ret = __clk_determine_rate(parent_hw, req);
-       if (!ret) {
-               ret = clk_sccg_pll_find_setup(setup, req->rate,
-                                               rate, bypass);
-       }
-
-       req->best_parent_hw = parent_hw;
-       req->best_parent_rate = req->rate;
-       req->rate = setup->fout;
-
-       return ret;
-}
-
-static int clk_sccg_pll_determine_rate(struct clk_hw *hw,
-                                      struct clk_rate_request *req)
-{
-       struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-       struct clk_sccg_pll_setup *setup = &pll->setup;
-       uint64_t rate = req->rate;
-       uint64_t min = req->min_rate;
-       uint64_t max = req->max_rate;
-       int ret = -EINVAL;
-
-       if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
-               return ret;
-
-       ret = __clk_sccg_pll_determine_rate(hw, req, req->rate, req->rate,
-                                               rate, PLL_BYPASS2);
-       if (!ret)
-               return ret;
-
-       ret = __clk_sccg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
-                                               PLL_STAGE1_REF_MAX_FREQ, rate,
-                                               PLL_BYPASS1);
-       if (!ret)
-               return ret;
-
-       ret = __clk_sccg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
-                                               PLL_REF_MAX_FREQ, rate,
-                                               PLL_BYPASS_NONE);
-       if (!ret)
-               return ret;
-
-       if (setup->fout >= min && setup->fout <= max)
-               ret = 0;
-
-       return ret;
-}
-
-static const struct clk_ops clk_sccg_pll_ops = {
-       .prepare        = clk_sccg_pll_prepare,
-       .unprepare      = clk_sccg_pll_unprepare,
-       .is_prepared    = clk_sccg_pll_is_prepared,
-       .recalc_rate    = clk_sccg_pll_recalc_rate,
-       .set_rate       = clk_sccg_pll_set_rate,
-       .set_parent     = clk_sccg_pll_set_parent,
-       .get_parent     = clk_sccg_pll_get_parent,
-       .determine_rate = clk_sccg_pll_determine_rate,
-};
-
-struct clk *imx_clk_sccg_pll(const char *name,
-                               const char * const *parent_names,
-                               u8 num_parents,
-                               u8 parent, u8 bypass1, u8 bypass2,
-                               void __iomem *base,
-                               unsigned long flags)
-{
-       struct clk_sccg_pll *pll;
-       struct clk_init_data init;
-       struct clk_hw *hw;
-       int ret;
-
-       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
-       if (!pll)
-               return ERR_PTR(-ENOMEM);
-
-       pll->parent = parent;
-       pll->bypass1 = bypass1;
-       pll->bypass2 = bypass2;
-
-       pll->base = base;
-       init.name = name;
-       init.ops = &clk_sccg_pll_ops;
-
-       init.flags = flags;
-       init.parent_names = parent_names;
-       init.num_parents = num_parents;
-
-       pll->base = base;
-       pll->hw.init = &init;
-
-       hw = &pll->hw;
-
-       ret = clk_hw_register(NULL, hw);
-       if (ret) {
-               kfree(pll);
-               return ERR_PTR(ret);
-       }
-
-       return hw->clk;
-}
diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c
new file mode 100644 (file)
index 0000000..acd1b90
--- /dev/null
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2018 NXP.
+ *
+ * This driver supports the SCCG plls found in the imx8m SOCs
+ *
+ * Documentation for this SCCG pll can be found at:
+ *   https://www.nxp.com/docs/en/reference-manual/IMX8MDQLQRM.pdf#page=834
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+
+#include "clk.h"
+
+/* PLL CFGs */
+#define PLL_CFG0               0x0
+#define PLL_CFG1               0x4
+#define PLL_CFG2               0x8
+
+#define PLL_DIVF1_MASK         GENMASK(18, 13)
+#define PLL_DIVF2_MASK         GENMASK(12, 7)
+#define PLL_DIVR1_MASK         GENMASK(27, 25)
+#define PLL_DIVR2_MASK         GENMASK(24, 19)
+#define PLL_DIVQ_MASK           GENMASK(6, 1)
+#define PLL_REF_MASK           GENMASK(2, 0)
+
+#define PLL_LOCK_MASK          BIT(31)
+#define PLL_PD_MASK            BIT(7)
+
+/* These are the specification limits for the SSCG PLL */
+#define PLL_REF_MIN_FREQ               25000000UL
+#define PLL_REF_MAX_FREQ               235000000UL
+
+#define PLL_STAGE1_MIN_FREQ            1600000000UL
+#define PLL_STAGE1_MAX_FREQ            2400000000UL
+
+#define PLL_STAGE1_REF_MIN_FREQ                25000000UL
+#define PLL_STAGE1_REF_MAX_FREQ                54000000UL
+
+#define PLL_STAGE2_MIN_FREQ            1200000000UL
+#define PLL_STAGE2_MAX_FREQ            2400000000UL
+
+#define PLL_STAGE2_REF_MIN_FREQ                54000000UL
+#define PLL_STAGE2_REF_MAX_FREQ                75000000UL
+
+#define PLL_OUT_MIN_FREQ               20000000UL
+#define PLL_OUT_MAX_FREQ               1200000000UL
+
+#define PLL_DIVR1_MAX                  7
+#define PLL_DIVR2_MAX                  63
+#define PLL_DIVF1_MAX                  63
+#define PLL_DIVF2_MAX                  63
+#define PLL_DIVQ_MAX                   63
+
+#define PLL_BYPASS_NONE                        0x0
+#define PLL_BYPASS1                    0x2
+#define PLL_BYPASS2                    0x1
+
+#define SSCG_PLL_BYPASS1_MASK           BIT(5)
+#define SSCG_PLL_BYPASS2_MASK           BIT(4)
+#define SSCG_PLL_BYPASS_MASK           GENMASK(5, 4)
+
+#define PLL_SCCG_LOCK_TIMEOUT          70
+
+struct clk_sscg_pll_setup {
+       int divr1, divf1;
+       int divr2, divf2;
+       int divq;
+       int bypass;
+
+       uint64_t vco1;
+       uint64_t vco2;
+       uint64_t fout;
+       uint64_t ref;
+       uint64_t ref_div1;
+       uint64_t ref_div2;
+       uint64_t fout_request;
+       int fout_error;
+};
+
+struct clk_sscg_pll {
+       struct clk_hw   hw;
+       const struct clk_ops  ops;
+
+       void __iomem *base;
+
+       struct clk_sscg_pll_setup setup;
+
+       u8 parent;
+       u8 bypass1;
+       u8 bypass2;
+};
+
+#define to_clk_sscg_pll(_hw) container_of(_hw, struct clk_sscg_pll, hw)
+
+static int clk_sscg_pll_wait_lock(struct clk_sscg_pll *pll)
+{
+       u32 val;
+
+       val = readl_relaxed(pll->base + PLL_CFG0);
+
+       /* don't wait for lock if all plls are bypassed */
+       if (!(val & SSCG_PLL_BYPASS2_MASK))
+               return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK,
+                                               0, PLL_SCCG_LOCK_TIMEOUT);
+
+       return 0;
+}
+
+static int clk_sscg_pll2_check_match(struct clk_sscg_pll_setup *setup,
+                                       struct clk_sscg_pll_setup *temp_setup)
+{
+       int new_diff = temp_setup->fout - temp_setup->fout_request;
+       int diff = temp_setup->fout_error;
+
+       if (abs(diff) > abs(new_diff)) {
+               temp_setup->fout_error = new_diff;
+               memcpy(setup, temp_setup, sizeof(struct clk_sscg_pll_setup));
+
+               if (temp_setup->fout_request == temp_setup->fout)
+                       return 0;
+       }
+       return -1;
+}
+
+static int clk_sscg_divq_lookup(struct clk_sscg_pll_setup *setup,
+                               struct clk_sscg_pll_setup *temp_setup)
+{
+       int ret = -EINVAL;
+
+       for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX;
+            temp_setup->divq++) {
+               temp_setup->vco2 = temp_setup->vco1;
+               do_div(temp_setup->vco2, temp_setup->divr2 + 1);
+               temp_setup->vco2 *= 2;
+               temp_setup->vco2 *= temp_setup->divf2 + 1;
+               if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ &&
+                               temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) {
+                       temp_setup->fout = temp_setup->vco2;
+                       do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
+
+                       ret = clk_sscg_pll2_check_match(setup, temp_setup);
+                       if (!ret) {
+                               temp_setup->bypass = PLL_BYPASS1;
+                               return ret;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int clk_sscg_divf2_lookup(struct clk_sscg_pll_setup *setup,
+                                       struct clk_sscg_pll_setup *temp_setup)
+{
+       int ret = -EINVAL;
+
+       for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
+            temp_setup->divf2++) {
+               ret = clk_sscg_divq_lookup(setup, temp_setup);
+               if (!ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+static int clk_sscg_divr2_lookup(struct clk_sscg_pll_setup *setup,
+                               struct clk_sscg_pll_setup *temp_setup)
+{
+       int ret = -EINVAL;
+
+       for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX;
+            temp_setup->divr2++) {
+               temp_setup->ref_div2 = temp_setup->vco1;
+               do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
+               if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
+                   temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
+                       ret = clk_sscg_divf2_lookup(setup, temp_setup);
+                       if (!ret)
+                               return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup,
+                                       struct clk_sscg_pll_setup *temp_setup,
+                                       uint64_t ref)
+{
+
+       int ret = -EINVAL;
+
+       if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
+               return ret;
+
+       temp_setup->vco1 = ref;
+
+       ret = clk_sscg_divr2_lookup(setup, temp_setup);
+       return ret;
+}
+
+static int clk_sscg_divf1_lookup(struct clk_sscg_pll_setup *setup,
+                               struct clk_sscg_pll_setup *temp_setup)
+{
+       int ret = -EINVAL;
+
+       for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX;
+            temp_setup->divf1++) {
+               uint64_t vco1 = temp_setup->ref;
+
+               do_div(vco1, temp_setup->divr1 + 1);
+               vco1 *= 2;
+               vco1 *= temp_setup->divf1 + 1;
+
+               ret = clk_sscg_pll2_find_setup(setup, temp_setup, vco1);
+               if (!ret) {
+                       temp_setup->bypass = PLL_BYPASS_NONE;
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int clk_sscg_divr1_lookup(struct clk_sscg_pll_setup *setup,
+                               struct clk_sscg_pll_setup *temp_setup)
+{
+       int ret = -EINVAL;
+
+       for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX;
+            temp_setup->divr1++) {
+               temp_setup->ref_div1 = temp_setup->ref;
+               do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
+               if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
+                   temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
+                       ret = clk_sscg_divf1_lookup(setup, temp_setup);
+                       if (!ret)
+                               return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup,
+                                       struct clk_sscg_pll_setup *temp_setup,
+                                       uint64_t ref)
+{
+
+       int ret = -EINVAL;
+
+       if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
+               return ret;
+
+       temp_setup->ref = ref;
+
+       ret = clk_sscg_divr1_lookup(setup, temp_setup);
+
+       return ret;
+}
+
+static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
+                                       uint64_t prate,
+                                       uint64_t rate, int try_bypass)
+{
+       struct clk_sscg_pll_setup temp_setup;
+       int ret = -EINVAL;
+
+       memset(&temp_setup, 0, sizeof(struct clk_sscg_pll_setup));
+       memset(setup, 0, sizeof(struct clk_sscg_pll_setup));
+
+       temp_setup.fout_error = PLL_OUT_MAX_FREQ;
+       temp_setup.fout_request = rate;
+
+       switch (try_bypass) {
+
+       case PLL_BYPASS2:
+               if (prate == rate) {
+                       setup->bypass = PLL_BYPASS2;
+                       setup->fout = rate;
+                       ret = 0;
+               }
+               break;
+
+       case PLL_BYPASS1:
+               ret = clk_sscg_pll2_find_setup(setup, &temp_setup, prate);
+               break;
+
+       case PLL_BYPASS_NONE:
+               ret = clk_sscg_pll1_find_setup(setup, &temp_setup, prate);
+               break;
+       }
+
+       return ret;
+}
+
+
+static int clk_sscg_pll_is_prepared(struct clk_hw *hw)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+
+       u32 val = readl_relaxed(pll->base + PLL_CFG0);
+
+       return (val & PLL_PD_MASK) ? 0 : 1;
+}
+
+static int clk_sscg_pll_prepare(struct clk_hw *hw)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+       u32 val;
+
+       val = readl_relaxed(pll->base + PLL_CFG0);
+       val &= ~PLL_PD_MASK;
+       writel_relaxed(val, pll->base + PLL_CFG0);
+
+       return clk_sscg_pll_wait_lock(pll);
+}
+
+static void clk_sscg_pll_unprepare(struct clk_hw *hw)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+       u32 val;
+
+       val = readl_relaxed(pll->base + PLL_CFG0);
+       val |= PLL_PD_MASK;
+       writel_relaxed(val, pll->base + PLL_CFG0);
+}
+
+static unsigned long clk_sscg_pll_recalc_rate(struct clk_hw *hw,
+                                        unsigned long parent_rate)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+       u32 val, divr1, divf1, divr2, divf2, divq;
+       u64 temp64;
+
+       val = readl_relaxed(pll->base + PLL_CFG2);
+       divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
+       divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
+       divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
+       divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
+       divq = FIELD_GET(PLL_DIVQ_MASK, val);
+
+       temp64 = parent_rate;
+
+       val = readl(pll->base + PLL_CFG0);
+       if (val & SSCG_PLL_BYPASS2_MASK) {
+               temp64 = parent_rate;
+       } else if (val & SSCG_PLL_BYPASS1_MASK) {
+               temp64 *= divf2;
+               do_div(temp64, (divr2 + 1) * (divq + 1));
+       } else {
+               temp64 *= 2;
+               temp64 *= (divf1 + 1) * (divf2 + 1);
+               do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1));
+       }
+
+       return temp64;
+}
+
+static int clk_sscg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                           unsigned long parent_rate)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+       struct clk_sscg_pll_setup *setup = &pll->setup;
+       u32 val;
+
+       /* set bypass here too since the parent might be the same */
+       val = readl(pll->base + PLL_CFG0);
+       val &= ~SSCG_PLL_BYPASS_MASK;
+       val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
+       writel(val, pll->base + PLL_CFG0);
+
+       val = readl_relaxed(pll->base + PLL_CFG2);
+       val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
+       val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK);
+       val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1);
+       val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2);
+       val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1);
+       val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2);
+       val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
+       writel_relaxed(val, pll->base + PLL_CFG2);
+
+       return clk_sscg_pll_wait_lock(pll);
+}
+
+static u8 clk_sscg_pll_get_parent(struct clk_hw *hw)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+       u32 val;
+       u8 ret = pll->parent;
+
+       val = readl(pll->base + PLL_CFG0);
+       if (val & SSCG_PLL_BYPASS2_MASK)
+               ret = pll->bypass2;
+       else if (val & SSCG_PLL_BYPASS1_MASK)
+               ret = pll->bypass1;
+       return ret;
+}
+
+static int clk_sscg_pll_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+       u32 val;
+
+       val = readl(pll->base + PLL_CFG0);
+       val &= ~SSCG_PLL_BYPASS_MASK;
+       val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
+       writel(val, pll->base + PLL_CFG0);
+
+       return clk_sscg_pll_wait_lock(pll);
+}
+
+static int __clk_sscg_pll_determine_rate(struct clk_hw *hw,
+                                       struct clk_rate_request *req,
+                                       uint64_t min,
+                                       uint64_t max,
+                                       uint64_t rate,
+                                       int bypass)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+       struct clk_sscg_pll_setup *setup = &pll->setup;
+       struct clk_hw *parent_hw = NULL;
+       int bypass_parent_index;
+       int ret = -EINVAL;
+
+       req->max_rate = max;
+       req->min_rate = min;
+
+       switch (bypass) {
+       case PLL_BYPASS2:
+               bypass_parent_index = pll->bypass2;
+               break;
+       case PLL_BYPASS1:
+               bypass_parent_index = pll->bypass1;
+               break;
+       default:
+               bypass_parent_index = pll->parent;
+               break;
+       }
+
+       parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
+       ret = __clk_determine_rate(parent_hw, req);
+       if (!ret) {
+               ret = clk_sscg_pll_find_setup(setup, req->rate,
+                                               rate, bypass);
+       }
+
+       req->best_parent_hw = parent_hw;
+       req->best_parent_rate = req->rate;
+       req->rate = setup->fout;
+
+       return ret;
+}
+
+static int clk_sscg_pll_determine_rate(struct clk_hw *hw,
+                                      struct clk_rate_request *req)
+{
+       struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+       struct clk_sscg_pll_setup *setup = &pll->setup;
+       uint64_t rate = req->rate;
+       uint64_t min = req->min_rate;
+       uint64_t max = req->max_rate;
+       int ret = -EINVAL;
+
+       if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
+               return ret;
+
+       ret = __clk_sscg_pll_determine_rate(hw, req, req->rate, req->rate,
+                                               rate, PLL_BYPASS2);
+       if (!ret)
+               return ret;
+
+       ret = __clk_sscg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
+                                               PLL_STAGE1_REF_MAX_FREQ, rate,
+                                               PLL_BYPASS1);
+       if (!ret)
+               return ret;
+
+       ret = __clk_sscg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
+                                               PLL_REF_MAX_FREQ, rate,
+                                               PLL_BYPASS_NONE);
+       if (!ret)
+               return ret;
+
+       if (setup->fout >= min && setup->fout <= max)
+               ret = 0;
+
+       return ret;
+}
+
+static const struct clk_ops clk_sscg_pll_ops = {
+       .prepare        = clk_sscg_pll_prepare,
+       .unprepare      = clk_sscg_pll_unprepare,
+       .is_prepared    = clk_sscg_pll_is_prepared,
+       .recalc_rate    = clk_sscg_pll_recalc_rate,
+       .set_rate       = clk_sscg_pll_set_rate,
+       .set_parent     = clk_sscg_pll_set_parent,
+       .get_parent     = clk_sscg_pll_get_parent,
+       .determine_rate = clk_sscg_pll_determine_rate,
+};
+
+struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
+                               const char * const *parent_names,
+                               u8 num_parents,
+                               u8 parent, u8 bypass1, u8 bypass2,
+                               void __iomem *base,
+                               unsigned long flags)
+{
+       struct clk_sscg_pll *pll;
+       struct clk_init_data init;
+       struct clk_hw *hw;
+       int ret;
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return ERR_PTR(-ENOMEM);
+
+       pll->parent = parent;
+       pll->bypass1 = bypass1;
+       pll->bypass2 = bypass2;
+
+       pll->base = base;
+       init.name = name;
+       init.ops = &clk_sscg_pll_ops;
+
+       init.flags = flags;
+       init.parent_names = parent_names;
+       init.num_parents = num_parents;
+
+       pll->base = base;
+       pll->hw.init = &init;
+
+       hw = &pll->hw;
+
+       ret = clk_hw_register(NULL, hw);
+       if (ret) {
+               kfree(pll);
+               return ERR_PTR(ret);
+       }
+
+       return hw;
+}
index cfc05e4..87ab8db 100644 (file)
@@ -22,6 +22,14 @@ void imx_unregister_clocks(struct clk *clks[], unsigned int count)
                clk_unregister(clks[i]);
 }
 
+void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count)
+{
+       unsigned int i;
+
+       for (i = 0; i < count; i++)
+               clk_hw_unregister(hws[i]);
+}
+
 void __init imx_mmdc_mask_handshake(void __iomem *ccm_base,
                                    unsigned int chn)
 {
@@ -94,8 +102,8 @@ struct clk_hw * __init imx_obtain_fixed_clock_hw(
        return __clk_get_hw(clk);
 }
 
-struct clk_hw * __init imx_obtain_fixed_clk_hw(struct device_node *np,
-                                              const char *name)
+struct clk_hw * imx_obtain_fixed_clk_hw(struct device_node *np,
+                                       const char *name)
 {
        struct clk *clk;
 
index bc5bb6a..b05213b 100644 (file)
@@ -12,6 +12,7 @@ void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
 void imx_register_uart_clocks(struct clk ** const clks[]);
 void imx_mmdc_mask_handshake(void __iomem *ccm_base, unsigned int chn);
 void imx_unregister_clocks(struct clk *clks[], unsigned int count);
+void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count);
 
 extern void imx_cscmr1_fixup(u32 *val);
 
@@ -24,7 +25,7 @@ enum imx_pllv1_type {
        IMX_PLLV1_IMX35,
 };
 
-enum imx_sccg_pll_type {
+enum imx_sscg_pll_type {
        SCCG_PLL1,
        SCCG_PLL2,
 };
@@ -52,64 +53,98 @@ struct imx_pll14xx_clk {
 
 extern struct imx_pll14xx_clk imx_1416x_pll;
 extern struct imx_pll14xx_clk imx_1443x_pll;
+extern struct imx_pll14xx_clk imx_1443x_dram_pll;
 
 #define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
-       imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk
+       to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
 
 #define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
                                cgr_val, clk_gate_flags, lock, share_count) \
-       clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
-                               cgr_val, clk_gate_flags, lock, share_count)->clk
+       to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
+                               cgr_val, clk_gate_flags, lock, share_count))
 
 #define imx_clk_pllv3(type, name, parent_name, base, div_mask) \
-       imx_clk_hw_pllv3(type, name, parent_name, base, div_mask)->clk
+       to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask))
 
 #define imx_clk_pfd(name, parent_name, reg, idx) \
-       imx_clk_hw_pfd(name, parent_name, reg, idx)->clk
+       to_clk(imx_clk_hw_pfd(name, parent_name, reg, idx))
 
 #define imx_clk_gate_exclusive(name, parent, reg, shift, exclusive_mask) \
-       imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)->clk
+       to_clk(imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask))
+
+#define imx_clk_fixed(name, rate) \
+       to_clk(imx_clk_hw_fixed(name, rate))
 
 #define imx_clk_fixed_factor(name, parent, mult, div) \
-       imx_clk_hw_fixed_factor(name, parent, mult, div)->clk
+       to_clk(imx_clk_hw_fixed_factor(name, parent, mult, div))
+
+#define imx_clk_divider(name, parent, reg, shift, width) \
+       to_clk(imx_clk_hw_divider(name, parent, reg, shift, width))
 
 #define imx_clk_divider2(name, parent, reg, shift, width) \
-       imx_clk_hw_divider2(name, parent, reg, shift, width)->clk
+       to_clk(imx_clk_hw_divider2(name, parent, reg, shift, width))
+
+#define imx_clk_divider_flags(name, parent, reg, shift, width, flags) \
+       to_clk(imx_clk_hw_divider_flags(name, parent, reg, shift, width, flags))
+
+#define imx_clk_gate(name, parent, reg, shift) \
+       to_clk(imx_clk_hw_gate(name, parent, reg, shift))
 
 #define imx_clk_gate_dis(name, parent, reg, shift) \
-       imx_clk_hw_gate_dis(name, parent, reg, shift)->clk
+       to_clk(imx_clk_hw_gate_dis(name, parent, reg, shift))
 
 #define imx_clk_gate2(name, parent, reg, shift) \
-       imx_clk_hw_gate2(name, parent, reg, shift)->clk
+       to_clk(imx_clk_hw_gate2(name, parent, reg, shift))
 
 #define imx_clk_gate2_flags(name, parent, reg, shift, flags) \
-       imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)->clk
+       to_clk(imx_clk_hw_gate2_flags(name, parent, reg, shift, flags))
 
 #define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \
-       imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)->clk
+       to_clk(imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count))
 
 #define imx_clk_gate3(name, parent, reg, shift) \
-       imx_clk_hw_gate3(name, parent, reg, shift)->clk
+       to_clk(imx_clk_hw_gate3(name, parent, reg, shift))
 
 #define imx_clk_gate4(name, parent, reg, shift) \
-       imx_clk_hw_gate4(name, parent, reg, shift)->clk
+       to_clk(imx_clk_hw_gate4(name, parent, reg, shift))
 
 #define imx_clk_mux(name, reg, shift, width, parents, num_parents) \
-       imx_clk_hw_mux(name, reg, shift, width, parents, num_parents)->clk
+       to_clk(imx_clk_hw_mux(name, reg, shift, width, parents, num_parents))
+
+#define imx_clk_pllv1(type, name, parent, base) \
+       to_clk(imx_clk_hw_pllv1(type, name, parent, base))
+
+#define imx_clk_pllv2(name, parent, base) \
+       to_clk(imx_clk_hw_pllv2(name, parent, base))
+
+#define imx_clk_frac_pll(name, parent_name, base) \
+       to_clk(imx_clk_hw_frac_pll(name, parent_name, base))
+
+#define imx_clk_sscg_pll(name, parent_names, num_parents, parent,\
+                               bypass1, bypass2, base, flags) \
+       to_clk(imx_clk_hw_sscg_pll(name, parent_names, num_parents, parent,\
+                               bypass1, bypass2, base, flags))
 
 struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
                 void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
 
-struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
+#define imx_clk_pll14xx(name, parent_name, base, pll_clk) \
+       to_clk(imx_clk_hw_pll14xx(name, parent_name, base, pll_clk))
+
+struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
+                                 void __iomem *base,
+                                 const struct imx_pll14xx_clk *pll_clk);
+
+struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name,
                const char *parent, void __iomem *base);
 
-struct clk *imx_clk_pllv2(const char *name, const char *parent,
+struct clk_hw *imx_clk_hw_pllv2(const char *name, const char *parent,
                void __iomem *base);
 
-struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_frac_pll(const char *name, const char *parent_name,
                             void __iomem *base);
 
-struct clk *imx_clk_sccg_pll(const char *name,
+struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
                                const char * const *parent_names,
                                u8 num_parents,
                                u8 parent, u8 bypass1, u8 bypass2,
@@ -149,7 +184,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
                .kdiv   =       (_k),                   \
        }
 
-struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
                             void __iomem *base);
 
 struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
@@ -173,7 +208,7 @@ struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent,
 struct clk_hw *imx_clk_hw_pfd(const char *name, const char *parent_name,
                void __iomem *reg, u8 idx);
 
-struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
                             void __iomem *reg, u8 idx);
 
 struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
@@ -184,7 +219,7 @@ struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift
                             u8 width, void __iomem *busy_reg, u8 busy_shift,
                             const char * const *parent_names, int num_parents);
 
-struct clk_hw *imx7ulp_clk_composite(const char *name,
+struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
                                     const char * const *parent_names,
                                     int num_parents, bool mux_present,
                                     bool rate_present, bool gate_present,
@@ -198,9 +233,11 @@ struct clk_hw *imx_clk_hw_fixup_mux(const char *name, void __iomem *reg,
                              u8 shift, u8 width, const char * const *parents,
                              int num_parents, void (*fixup)(u32 *val));
 
-static inline struct clk *imx_clk_fixed(const char *name, int rate)
+static inline struct clk *to_clk(struct clk_hw *hw)
 {
-       return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
+       if (IS_ERR_OR_NULL(hw))
+               return ERR_CAST(hw);
+       return hw->clk;
 }
 
 static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
@@ -224,13 +261,6 @@ static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name,
                        CLK_SET_RATE_PARENT, mult, div);
 }
 
-static inline struct clk *imx_clk_divider(const char *name, const char *parent,
-               void __iomem *reg, u8 shift, u8 width)
-{
-       return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
-                       reg, shift, width, 0, &imx_ccm_lock);
-}
-
 static inline struct clk_hw *imx_clk_hw_divider(const char *name,
                                                const char *parent,
                                                void __iomem *reg, u8 shift,
@@ -240,14 +270,6 @@ static inline struct clk_hw *imx_clk_hw_divider(const char *name,
                                       reg, shift, width, 0, &imx_ccm_lock);
 }
 
-static inline struct clk *imx_clk_divider_flags(const char *name,
-               const char *parent, void __iomem *reg, u8 shift, u8 width,
-               unsigned long flags)
-{
-       return clk_register_divider(NULL, name, parent, flags,
-                       reg, shift, width, 0, &imx_ccm_lock);
-}
-
 static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name,
                                                   const char *parent,
                                                   void __iomem *reg, u8 shift,
@@ -274,13 +296,6 @@ static inline struct clk *imx_clk_divider2_flags(const char *name,
                        reg, shift, width, 0, &imx_ccm_lock);
 }
 
-static inline struct clk *imx_clk_gate(const char *name, const char *parent,
-               void __iomem *reg, u8 shift)
-{
-       return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
-                       shift, 0, &imx_ccm_lock);
-}
-
 static inline struct clk_hw *imx_clk_hw_gate_flags(const char *name, const char *parent,
                void __iomem *reg, u8 shift, unsigned long flags)
 {
@@ -355,15 +370,18 @@ static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *pare
                        reg, shift, 0, &imx_ccm_lock);
 }
 
-static inline struct clk *imx_clk_gate3_flags(const char *name,
+static inline struct clk_hw *imx_clk_hw_gate3_flags(const char *name,
                const char *parent, void __iomem *reg, u8 shift,
                unsigned long flags)
 {
-       return clk_register_gate(NULL, name, parent,
+       return clk_hw_register_gate(NULL, name, parent,
                        flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
                        reg, shift, 0, &imx_ccm_lock);
 }
 
+#define imx_clk_gate3_flags(name, parent, reg, shift, flags) \
+       to_clk(imx_clk_hw_gate3_flags(name, parent, reg, shift, flags))
+
 static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *parent,
                void __iomem *reg, u8 shift)
 {
@@ -372,15 +390,18 @@ static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *pare
                        reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
 }
 
-static inline struct clk *imx_clk_gate4_flags(const char *name,
+static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
                const char *parent, void __iomem *reg, u8 shift,
                unsigned long flags)
 {
-       return clk_register_gate2(NULL, name, parent,
+       return clk_hw_register_gate2(NULL, name, parent,
                        flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
                        reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
 }
 
+#define imx_clk_gate4_flags(name, parent, reg, shift, flags) \
+       to_clk(imx_clk_hw_gate4_flags(name, parent, reg, shift, flags))
+
 static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg,
                        u8 shift, u8 width, const char * const *parents,
                        int num_parents)
@@ -420,6 +441,16 @@ static inline struct clk *imx_clk_mux_flags(const char *name,
                        &imx_ccm_lock);
 }
 
+static inline struct clk_hw *imx_clk_hw_mux2_flags(const char *name,
+               void __iomem *reg, u8 shift, u8 width,
+               const char * const *parents,
+               int num_parents, unsigned long flags)
+{
+       return clk_hw_register_mux(NULL, name, parents, num_parents,
+                       flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
+                       reg, shift, width, 0, &imx_ccm_lock);
+}
+
 static inline struct clk *imx_clk_mux2_flags(const char *name,
                void __iomem *reg, u8 shift, u8 width,
                const char * const *parents,
@@ -446,23 +477,38 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
                struct clk *div, struct clk *mux, struct clk *pll,
                struct clk *step);
 
-struct clk *imx8m_clk_composite_flags(const char *name,
-                                       const char * const *parent_names,
-                                       int num_parents, void __iomem *reg,
-                                       unsigned long flags);
+struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
+                                           const char * const *parent_names,
+                                           int num_parents,
+                                           void __iomem *reg,
+                                           unsigned long flags);
 
-#define __imx8m_clk_composite(name, parent_names, reg, flags) \
-       imx8m_clk_composite_flags(name, parent_names, \
+#define imx8m_clk_composite_flags(name, parent_names, num_parents, reg, \
+                                 flags) \
+       to_clk(imx8m_clk_hw_composite_flags(name, parent_names, \
+                               num_parents, reg, flags))
+
+#define __imx8m_clk_hw_composite(name, parent_names, reg, flags) \
+       imx8m_clk_hw_composite_flags(name, parent_names, \
                ARRAY_SIZE(parent_names), reg, \
                flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
 
+#define __imx8m_clk_composite(name, parent_names, reg, flags) \
+       to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
+
+#define imx8m_clk_hw_composite(name, parent_names, reg) \
+       __imx8m_clk_hw_composite(name, parent_names, reg, 0)
+
 #define imx8m_clk_composite(name, parent_names, reg) \
        __imx8m_clk_composite(name, parent_names, reg, 0)
 
+#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
+       __imx8m_clk_hw_composite(name, parent_names, reg, CLK_IS_CRITICAL)
+
 #define imx8m_clk_composite_critical(name, parent_names, reg) \
        __imx8m_clk_composite(name, parent_names, reg, CLK_IS_CRITICAL)
 
-struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
                unsigned long flags, void __iomem *reg, u8 shift, u8 width,
                u8 clk_divider_flags, const struct clk_div_table *table,
                spinlock_t *lock);
index 7efc361..ea3c70d 100644 (file)
@@ -174,36 +174,36 @@ config COMMON_CLK_MT6779_AUDSYS
          This driver supports Mediatek MT6779 audsys clocks.
 
 config COMMON_CLK_MT6797
-       bool "Clock driver for MediaTek MT6797"
-       depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
-       select COMMON_CLK_MEDIATEK
-       default ARCH_MEDIATEK && ARM64
-       ---help---
-         This driver supports MediaTek MT6797 basic clocks.
+       bool "Clock driver for MediaTek MT6797"
+       depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+       select COMMON_CLK_MEDIATEK
+       default ARCH_MEDIATEK && ARM64
+       ---help---
+         This driver supports MediaTek MT6797 basic clocks.
 
 config COMMON_CLK_MT6797_MMSYS
-       bool "Clock driver for MediaTek MT6797 mmsys"
-       depends on COMMON_CLK_MT6797
-       ---help---
-         This driver supports MediaTek MT6797 mmsys clocks.
+       bool "Clock driver for MediaTek MT6797 mmsys"
+       depends on COMMON_CLK_MT6797
+       ---help---
+         This driver supports MediaTek MT6797 mmsys clocks.
 
 config COMMON_CLK_MT6797_IMGSYS
-       bool "Clock driver for MediaTek MT6797 imgsys"
-       depends on COMMON_CLK_MT6797
-       ---help---
-         This driver supports MediaTek MT6797 imgsys clocks.
+       bool "Clock driver for MediaTek MT6797 imgsys"
+       depends on COMMON_CLK_MT6797
+       ---help---
+         This driver supports MediaTek MT6797 imgsys clocks.
 
 config COMMON_CLK_MT6797_VDECSYS
-       bool "Clock driver for MediaTek MT6797 vdecsys"
-       depends on COMMON_CLK_MT6797
-       ---help---
-         This driver supports MediaTek MT6797 vdecsys clocks.
+       bool "Clock driver for MediaTek MT6797 vdecsys"
+       depends on COMMON_CLK_MT6797
+       ---help---
+         This driver supports MediaTek MT6797 vdecsys clocks.
 
 config COMMON_CLK_MT6797_VENCSYS
-       bool "Clock driver for MediaTek MT6797 vencsys"
-       depends on COMMON_CLK_MT6797
-       ---help---
-         This driver supports MediaTek MT6797 vencsys clocks.
+       bool "Clock driver for MediaTek MT6797 vencsys"
+       depends on COMMON_CLK_MT6797
+       ---help---
+         This driver supports MediaTek MT6797 vencsys clocks.
 
 config COMMON_CLK_MT7622
        bool "Clock driver for MediaTek MT7622"
index 3939f21..6eca2a4 100644 (file)
@@ -18,4 +18,4 @@ obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
 obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
 obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
 obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o
-obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
+obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o meson8-ddr.o
index 2d39a8b..fc9df48 100644 (file)
@@ -129,7 +129,7 @@ static int mpll_set_rate(struct clk_hw *hw,
        return 0;
 }
 
-static void mpll_init(struct clk_hw *hw)
+static int mpll_init(struct clk_hw *hw)
 {
        struct clk_regmap *clk = to_clk_regmap(hw);
        struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
@@ -151,6 +151,8 @@ static void mpll_init(struct clk_hw *hw)
        /* Set the magic misc bit if required */
        if (MESON_PARM_APPLICABLE(&mpll->misc))
                meson_parm_write(clk->map, &mpll->misc, 1);
+
+       return 0;
 }
 
 const struct clk_ops meson_clk_mpll_ro_ops = {
index 80c3ada..fe22e17 100644 (file)
@@ -78,7 +78,7 @@ meson_clk_triphase_data(struct clk_regmap *clk)
        return (struct meson_clk_triphase_data *)clk->data;
 }
 
-static void meson_clk_triphase_sync(struct clk_hw *hw)
+static int meson_clk_triphase_sync(struct clk_hw *hw)
 {
        struct clk_regmap *clk = to_clk_regmap(hw);
        struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
@@ -88,6 +88,8 @@ static void meson_clk_triphase_sync(struct clk_hw *hw)
        val = meson_parm_read(clk->map, &tph->ph0);
        meson_parm_write(clk->map, &tph->ph1, val);
        meson_parm_write(clk->map, &tph->ph2, val);
+
+       return 0;
 }
 
 static int meson_clk_triphase_get_phase(struct clk_hw *hw)
index ddb1e56..b17a13e 100644 (file)
@@ -77,6 +77,15 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
        unsigned int m, n, frac;
 
        n = meson_parm_read(clk->map, &pll->n);
+
+       /*
+        * On some HW, N is set to zero on init. This value is invalid as
+        * it would result in a division by zero. The rate can't be
+        * calculated in this case
+        */
+       if (n == 0)
+               return 0;
+
        m = meson_parm_read(clk->map, &pll->m);
 
        frac = MESON_PARM_APPLICABLE(&pll->frac) ?
@@ -277,7 +286,7 @@ static int meson_clk_pll_wait_lock(struct clk_hw *hw)
        return -ETIMEDOUT;
 }
 
-static void meson_clk_pll_init(struct clk_hw *hw)
+static int meson_clk_pll_init(struct clk_hw *hw)
 {
        struct clk_regmap *clk = to_clk_regmap(hw);
        struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
@@ -288,6 +297,8 @@ static void meson_clk_pll_init(struct clk_hw *hw)
                                       pll->init_count);
                meson_parm_write(clk->map, &pll->rst, 0);
        }
+
+       return 0;
 }
 
 static int meson_clk_pll_is_enabled(struct clk_hw *hw)
index b3af61c..d2760a0 100644 (file)
@@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
        &g12a_bt656,
        &g12a_usb1_to_ddr,
        &g12a_mmc_pclk,
+       &g12a_uart2,
        &g12a_vpu_intr,
        &g12a_gic,
        &g12a_sd_emmc_a_clk0,
diff --git a/drivers/clk/meson/meson8-ddr.c b/drivers/clk/meson/meson8-ddr.c
new file mode 100644 (file)
index 0000000..4b73ea2
--- /dev/null
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson8 DDR clock controller
+ *
+ * Copyright (C) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <dt-bindings/clock/meson8-ddr-clkc.h>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+
+#define AM_DDR_PLL_CNTL                        0x00
+#define AM_DDR_PLL_CNTL1               0x04
+#define AM_DDR_PLL_CNTL2               0x08
+#define AM_DDR_PLL_CNTL3               0x0c
+#define AM_DDR_PLL_CNTL4               0x10
+#define AM_DDR_PLL_STS                 0x14
+#define DDR_CLK_CNTL                   0x18
+#define DDR_CLK_STS                    0x1c
+
+static struct clk_regmap meson8_ddr_pll_dco = {
+       .data = &(struct meson_clk_pll_data){
+               .en = {
+                       .reg_off = AM_DDR_PLL_CNTL,
+                       .shift   = 30,
+                       .width   = 1,
+               },
+               .m = {
+                       .reg_off = AM_DDR_PLL_CNTL,
+                       .shift   = 0,
+                       .width   = 9,
+               },
+               .n = {
+                       .reg_off = AM_DDR_PLL_CNTL,
+                       .shift   = 9,
+                       .width   = 5,
+               },
+               .l = {
+                       .reg_off = AM_DDR_PLL_CNTL,
+                       .shift   = 31,
+                       .width   = 1,
+               },
+               .rst = {
+                       .reg_off = AM_DDR_PLL_CNTL,
+                       .shift   = 29,
+                       .width   = 1,
+               },
+       },
+       .hw.init = &(struct clk_init_data){
+               .name = "ddr_pll_dco",
+               .ops = &meson_clk_pll_ro_ops,
+               .parent_data = &(const struct clk_parent_data) {
+                       .fw_name = "xtal",
+               },
+               .num_parents = 1,
+       },
+};
+
+static struct clk_regmap meson8_ddr_pll = {
+       .data = &(struct clk_regmap_div_data){
+               .offset = AM_DDR_PLL_CNTL,
+               .shift = 16,
+               .width = 2,
+               .flags = CLK_DIVIDER_POWER_OF_TWO,
+       },
+       .hw.init = &(struct clk_init_data){
+               .name = "ddr_pll",
+               .ops = &clk_regmap_divider_ro_ops,
+               .parent_hws = (const struct clk_hw *[]) {
+                       &meson8_ddr_pll_dco.hw
+               },
+               .num_parents = 1,
+       },
+};
+
+static struct clk_hw_onecell_data meson8_ddr_clk_hw_onecell_data = {
+       .hws = {
+               [DDR_CLKID_DDR_PLL_DCO]         = &meson8_ddr_pll_dco.hw,
+               [DDR_CLKID_DDR_PLL]             = &meson8_ddr_pll.hw,
+       },
+       .num = 2,
+};
+
+static struct clk_regmap *const meson8_ddr_clk_regmaps[] = {
+       &meson8_ddr_pll_dco,
+       &meson8_ddr_pll,
+};
+
+static const struct regmap_config meson8_ddr_clkc_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = DDR_CLK_STS,
+};
+
+static int meson8_ddr_clkc_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+       void __iomem *base;
+       struct clk_hw *hw;
+       int ret, i;
+
+       base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       regmap = devm_regmap_init_mmio(&pdev->dev, base,
+                                      &meson8_ddr_clkc_regmap_config);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* Populate regmap */
+       for (i = 0; i < ARRAY_SIZE(meson8_ddr_clk_regmaps); i++)
+               meson8_ddr_clk_regmaps[i]->map = regmap;
+
+       /* Register all clks */
+       for (i = 0; i < meson8_ddr_clk_hw_onecell_data.num; i++) {
+               hw = meson8_ddr_clk_hw_onecell_data.hws[i];
+
+               ret = devm_clk_hw_register(&pdev->dev, hw);
+               if (ret) {
+                       dev_err(&pdev->dev, "Clock registration failed\n");
+                       return ret;
+               }
+       }
+
+       return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+                                          &meson8_ddr_clk_hw_onecell_data);
+}
+
+static const struct of_device_id meson8_ddr_clkc_match_table[] = {
+       { .compatible = "amlogic,meson8-ddr-clkc" },
+       { .compatible = "amlogic,meson8b-ddr-clkc" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver meson8_ddr_clkc_driver = {
+       .probe          = meson8_ddr_clkc_probe,
+       .driver         = {
+               .name   = "meson8-ddr-clkc",
+               .of_match_table = meson8_ddr_clkc_match_table,
+       },
+};
+
+builtin_platform_driver(meson8_ddr_clkc_driver);
index 67e6691..9fd31f2 100644 (file)
@@ -97,8 +97,10 @@ static struct clk_regmap meson8b_fixed_pll_dco = {
        .hw.init = &(struct clk_init_data){
                .name = "fixed_pll_dco",
                .ops = &meson_clk_pll_ro_ops,
-               .parent_hws = (const struct clk_hw *[]) {
-                       &meson8b_xtal.hw
+               .parent_data = &(const struct clk_parent_data) {
+                       .fw_name = "xtal",
+                       .name = "xtal",
+                       .index = -1,
                },
                .num_parents = 1,
        },
@@ -162,8 +164,10 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
                /* sometimes also called "HPLL" or "HPLL PLL" */
                .name = "hdmi_pll_dco",
                .ops = &meson_clk_pll_ro_ops,
-               .parent_hws = (const struct clk_hw *[]) {
-                       &meson8b_xtal.hw
+               .parent_data = &(const struct clk_parent_data) {
+                       .fw_name = "xtal",
+                       .name = "xtal",
+                       .index = -1,
                },
                .num_parents = 1,
        },
@@ -237,8 +241,10 @@ static struct clk_regmap meson8b_sys_pll_dco = {
        .hw.init = &(struct clk_init_data){
                .name = "sys_pll_dco",
                .ops = &meson_clk_pll_ops,
-               .parent_hws = (const struct clk_hw *[]) {
-                       &meson8b_xtal.hw
+               .parent_data = &(const struct clk_parent_data) {
+                       .fw_name = "xtal",
+                       .name = "xtal",
+                       .index = -1,
                },
                .num_parents = 1,
        },
@@ -631,9 +637,9 @@ static struct clk_regmap meson8b_cpu_in_sel = {
        .hw.init = &(struct clk_init_data){
                .name = "cpu_in_sel",
                .ops = &clk_regmap_mux_ops,
-               .parent_hws = (const struct clk_hw *[]) {
-                       &meson8b_xtal.hw,
-                       &meson8b_sys_pll.hw,
+               .parent_data = (const struct clk_parent_data[]) {
+                       { .fw_name = "xtal", .name = "xtal", .index = -1, },
+                       { .hw = &meson8b_sys_pll.hw, },
                },
                .num_parents = 2,
                .flags = (CLK_SET_RATE_PARENT |
@@ -736,9 +742,9 @@ static struct clk_regmap meson8b_cpu_clk = {
        .hw.init = &(struct clk_init_data){
                .name = "cpu_clk",
                .ops = &clk_regmap_mux_ops,
-               .parent_hws = (const struct clk_hw *[]) {
-                       &meson8b_xtal.hw,
-                       &meson8b_cpu_scale_out_sel.hw,
+               .parent_data = (const struct clk_parent_data[]) {
+                       { .fw_name = "xtal", .name = "xtal", .index = -1, },
+                       { .hw = &meson8b_cpu_scale_out_sel.hw, },
                },
                .num_parents = 2,
                .flags = (CLK_SET_RATE_PARENT |
@@ -758,12 +764,12 @@ static struct clk_regmap meson8b_nand_clk_sel = {
                .name = "nand_clk_sel",
                .ops = &clk_regmap_mux_ops,
                /* FIXME all other parents are unknown: */
-               .parent_hws = (const struct clk_hw *[]) {
-                       &meson8b_fclk_div4.hw,
-                       &meson8b_fclk_div3.hw,
-                       &meson8b_fclk_div5.hw,
-                       &meson8b_fclk_div7.hw,
-                       &meson8b_xtal.hw,
+               .parent_data = (const struct clk_parent_data[]) {
+                       { .hw = &meson8b_fclk_div4.hw, },
+                       { .hw = &meson8b_fclk_div3.hw, },
+                       { .hw = &meson8b_fclk_div5.hw, },
+                       { .hw = &meson8b_fclk_div7.hw, },
+                       { .fw_name = "xtal", .name = "xtal", .index = -1, },
                },
                .num_parents = 5,
                .flags = CLK_SET_RATE_PARENT,
@@ -1721,8 +1727,10 @@ static struct clk_regmap meson8b_hdmi_sys_sel = {
                .name = "hdmi_sys_sel",
                .ops = &clk_regmap_mux_ro_ops,
                /* FIXME: all other parents are unknown */
-               .parent_hws = (const struct clk_hw *[]) {
-                       &meson8b_xtal.hw
+               .parent_data = &(const struct clk_parent_data) {
+                       .fw_name = "xtal",
+                       .name = "xtal",
+                       .index = -1,
                },
                .num_parents = 1,
                .flags = CLK_SET_RATE_NO_REPARENT,
@@ -1764,17 +1772,20 @@ static struct clk_regmap meson8b_hdmi_sys = {
 
 /*
  * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
- * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
- * has mali_0 and no glitch-free mux.
+ * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can
+ * actually manage this glitch-free mux because it does top-to-bottom
+ * updates the each clock tree and switches to the "inactive" one when
+ * CLK_SET_RATE_GATE is set.
+ * Meson8 only has mali_0 and no glitch-free mux.
  */
-static const struct clk_hw *meson8b_mali_0_1_parent_hws[] = {
-       &meson8b_xtal.hw,
-       &meson8b_mpll2.hw,
-       &meson8b_mpll1.hw,
-       &meson8b_fclk_div7.hw,
-       &meson8b_fclk_div4.hw,
-       &meson8b_fclk_div3.hw,
-       &meson8b_fclk_div5.hw,
+static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
+       { .fw_name = "xtal", .name = "xtal", .index = -1, },
+       { .hw = &meson8b_mpll2.hw, },
+       { .hw = &meson8b_mpll1.hw, },
+       { .hw = &meson8b_fclk_div7.hw, },
+       { .hw = &meson8b_fclk_div4.hw, },
+       { .hw = &meson8b_fclk_div3.hw, },
+       { .hw = &meson8b_fclk_div5.hw, },
 };
 
 static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
@@ -1789,8 +1800,8 @@ static struct clk_regmap meson8b_mali_0_sel = {
        .hw.init = &(struct clk_init_data){
                .name = "mali_0_sel",
                .ops = &clk_regmap_mux_ops,
-               .parent_hws = meson8b_mali_0_1_parent_hws,
-               .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_hws),
+               .parent_data = meson8b_mali_0_1_parent_data,
+               .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
                /*
                 * Don't propagate rate changes up because the only changeable
                 * parents are mpll1 and mpll2 but we need those for audio and
@@ -1830,7 +1841,7 @@ static struct clk_regmap meson8b_mali_0 = {
                        &meson8b_mali_0_div.hw
                },
                .num_parents = 1,
-               .flags = CLK_SET_RATE_PARENT,
+               .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
        },
 };
 
@@ -1844,8 +1855,8 @@ static struct clk_regmap meson8b_mali_1_sel = {
        .hw.init = &(struct clk_init_data){
                .name = "mali_1_sel",
                .ops = &clk_regmap_mux_ops,
-               .parent_hws = meson8b_mali_0_1_parent_hws,
-               .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_hws),
+               .parent_data = meson8b_mali_0_1_parent_data,
+               .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
                /*
                 * Don't propagate rate changes up because the only changeable
                 * parents are mpll1 and mpll2 but we need those for audio and
@@ -1885,7 +1896,7 @@ static struct clk_regmap meson8b_mali_1 = {
                        &meson8b_mali_1_div.hw
                },
                .num_parents = 1,
-               .flags = CLK_SET_RATE_PARENT,
+               .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
        },
 };
 
@@ -1944,8 +1955,10 @@ static struct clk_regmap meson8m2_gp_pll_dco = {
        .hw.init = &(struct clk_init_data){
                .name = "gp_pll_dco",
                .ops = &meson_clk_pll_ops,
-               .parent_hws = (const struct clk_hw *[]) {
-                       &meson8b_xtal.hw
+               .parent_data = &(const struct clk_parent_data) {
+                       .fw_name = "xtal",
+                       .name = "xtal",
+                       .index = -1,
                },
                .num_parents = 1,
        },
@@ -3585,7 +3598,7 @@ static const struct reset_control_ops meson8b_clk_reset_ops = {
 
 struct meson8b_nb_data {
        struct notifier_block nb;
-       struct clk_hw_onecell_data *onecell_data;
+       struct clk_hw *cpu_clk;
 };
 
 static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
@@ -3593,30 +3606,25 @@ static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
 {
        struct meson8b_nb_data *nb_data =
                container_of(nb, struct meson8b_nb_data, nb);
-       struct clk_hw **hws = nb_data->onecell_data->hws;
-       struct clk_hw *cpu_clk_hw, *parent_clk_hw;
-       struct clk *cpu_clk, *parent_clk;
+       struct clk_hw *parent_clk;
        int ret;
 
        switch (event) {
        case PRE_RATE_CHANGE:
-               parent_clk_hw = hws[CLKID_XTAL];
+               /* xtal */
+               parent_clk = clk_hw_get_parent_by_index(nb_data->cpu_clk, 0);
                break;
 
        case POST_RATE_CHANGE:
-               parent_clk_hw = hws[CLKID_CPU_SCALE_OUT_SEL];
+               /* cpu_scale_out_sel */
+               parent_clk = clk_hw_get_parent_by_index(nb_data->cpu_clk, 1);
                break;
 
        default:
                return NOTIFY_DONE;
        }
 
-       cpu_clk_hw = hws[CLKID_CPUCLK];
-       cpu_clk = __clk_lookup(clk_hw_get_name(cpu_clk_hw));
-
-       parent_clk = __clk_lookup(clk_hw_get_name(parent_clk_hw));
-
-       ret = clk_set_parent(cpu_clk, parent_clk);
+       ret = clk_hw_set_parent(nb_data->cpu_clk, parent_clk);
        if (ret)
                return notifier_from_errno(ret);
 
@@ -3682,20 +3690,26 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
                meson8b_clk_regmaps[i]->map = map;
 
        /*
-        * register all clks
-        * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
+        * always skip CLKID_UNUSED and also skip XTAL if the .dtb provides the
+        * XTAL clock as input.
         */
-       for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
+       if (!IS_ERR(of_clk_get_by_name(np, "xtal")))
+               i = CLKID_PLL_FIXED;
+       else
+               i = CLKID_XTAL;
+
+       /* register all clks */
+       for (; i < CLK_NR_CLKS; i++) {
                /* array might be sparse */
                if (!clk_hw_onecell_data->hws[i])
                        continue;
 
-               ret = clk_hw_register(NULL, clk_hw_onecell_data->hws[i]);
+               ret = of_clk_hw_register(np, clk_hw_onecell_data->hws[i]);
                if (ret)
                        return;
        }
 
-       meson8b_cpu_nb_data.onecell_data = clk_hw_onecell_data;
+       meson8b_cpu_nb_data.cpu_clk = clk_hw_onecell_data->hws[CLKID_CPUCLK];
 
        /*
         * FIXME we shouldn't program the muxes in notifier handlers. The
index 3acf037..76d31c0 100644 (file)
@@ -216,7 +216,7 @@ static int sclk_div_is_enabled(struct clk_hw *hw)
        return 0;
 }
 
-static void sclk_div_init(struct clk_hw *hw)
+static int sclk_div_init(struct clk_hw *hw)
 {
        struct clk_regmap *clk = to_clk_regmap(hw);
        struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
@@ -231,6 +231,8 @@ static void sclk_div_init(struct clk_hw *hw)
                sclk->cached_div = val + 1;
 
        sclk_div_get_duty_cycle(hw, &sclk->cached_duty);
+
+       return 0;
 }
 
 const struct clk_ops meson_sclk_div_ops = {
index 567755d..1b4f023 100644 (file)
@@ -266,10 +266,12 @@ static void roclk_disable(struct clk_hw *hw)
        writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
 }
 
-static void roclk_init(struct clk_hw *hw)
+static int roclk_init(struct clk_hw *hw)
 {
        /* initialize clock in disabled state */
        roclk_disable(hw);
+
+       return 0;
 }
 
 static u8 roclk_get_parent(struct clk_hw *hw)
@@ -880,7 +882,7 @@ static int sclk_set_parent(struct clk_hw *hw, u8 index)
        return err;
 }
 
-static void sclk_init(struct clk_hw *hw)
+static int sclk_init(struct clk_hw *hw)
 {
        struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
        unsigned long flags;
@@ -899,6 +901,8 @@ static void sclk_init(struct clk_hw *hw)
                writel(v, sclk->slew_reg);
                spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
        }
+
+       return 0;
 }
 
 /* sclk with post-divider */
index 90bf181..fabc09a 100644 (file)
@@ -109,7 +109,7 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
        return 0;
 }
 
-static void clk_factor_init(struct clk_hw *hw)
+static int clk_factor_init(struct clk_hw *hw)
 {
        struct mmp_clk_factor *factor = to_clk_factor(hw);
        struct mmp_clk_factor_masks *masks = factor->masks;
@@ -146,6 +146,8 @@ static void clk_factor_init(struct clk_hw *hw)
 
        if (factor->lock)
                spin_unlock_irqrestore(factor->lock, flags);
+
+       return 0;
 }
 
 static const struct clk_ops clk_factor_ops = {
index 90814b2..d2cd36c 100644 (file)
@@ -419,12 +419,14 @@ static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
        }
 }
 
-static void mmp_clk_mix_init(struct clk_hw *hw)
+static int mmp_clk_mix_init(struct clk_hw *hw)
 {
        struct mmp_clk_mix *mix = to_clk_mix(hw);
 
        if (mix->table)
                _filter_clk_table(mix, mix->table, mix->table_size);
+
+       return 0;
 }
 
 const struct clk_ops mmp_clk_mix_ops = {
index 415e690..ded07b0 100644 (file)
@@ -29,7 +29,7 @@ config ARMADA_39X_CLK
        select MVEBU_CLK_COMMON
 
 config ARMADA_37XX_CLK
-       bool
+       bool
 
 config ARMADA_XP_CLK
        bool
index 3b33ef1..15cdcdc 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config KRAIT_CLOCKS
-       bool
-       select KRAIT_L2_ACCESSORS
+       bool
+       select KRAIT_L2_ACCESSORS
 
 config QCOM_GDSC
        bool
@@ -14,6 +14,7 @@ menuconfig COMMON_CLK_QCOM
        tristate "Support for Qualcomm's clock controllers"
        depends on OF
        depends on ARCH_QCOM || COMPILE_TEST
+       select RATIONAL
        select REGMAP_MMIO
        select RESET_CONTROLLER
 
@@ -95,6 +96,14 @@ config IPQ_GCC_4019
          Say Y if you want to use peripheral devices such as UART, SPI,
          i2c, USB, SD/eMMC, etc.
 
+config IPQ_GCC_6018
+       tristate "IPQ6018 Global Clock Controller"
+       help
+         Support for global clock controller on ipq6018 devices.
+         Say Y if you want to use peripheral devices such as UART, SPI,
+         i2c, USB, SD/eMMC, etc. Select this for the root clock
+         of ipq6018.
+
 config IPQ_GCC_806X
        tristate "IPQ806x Global Clock Controller"
        help
@@ -229,6 +238,15 @@ config MSM_GPUCC_8998
          Say Y if you want to support graphics controller devices and
          functionality such as 3D graphics.
 
+config MSM_MMCC_8998
+       tristate "MSM8998 Multimedia Clock Controller"
+       select MSM_GCC_8998
+       select QCOM_GDSC
+       help
+         Support for the multimedia clock controller on msm8998 devices.
+         Say Y if you want to support multimedia devices such as display,
+         graphics, video encode/decode, camera, etc.
+
 config QCS_GCC_404
        tristate "QCS404 Global Clock Controller"
        help
@@ -236,6 +254,15 @@ config QCS_GCC_404
          Say Y if you want to use multimedia devices or peripheral
          devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
 
+config SC_DISPCC_7180
+       tristate "SC7180 Display Clock Controller"
+       select SC_GCC_7180
+       help
+         Support for the display clock controller on Qualcomm Technologies, Inc
+         SC7180 devices.
+         Say Y if you want to support display devices and functionality such as
+         splash screen.
+
 config SC_GCC_7180
        tristate "SC7180 Global Clock Controller"
        select QCOM_GDSC
@@ -245,6 +272,22 @@ config SC_GCC_7180
          Say Y if you want to use peripheral devices such as UART, SPI,
          I2C, USB, UFS, SDCC, etc.
 
+config SC_GPUCC_7180
+       tristate "SC7180 Graphics Clock Controller"
+       select SC_GCC_7180
+       help
+         Support for the graphics clock controller on SC7180 devices.
+         Say Y if you want to support graphics controller devices and
+         functionality such as 3D graphics.
+
+config SC_VIDEOCC_7180
+       tristate "SC7180 Video Clock Controller"
+       select SC_GCC_7180
+       help
+         Support for the video clock controller on SC7180 devices.
+         Say Y if you want to support video devices and functionality such as
+         video encode and decode.
+
 config SDM_CAMCC_845
        tristate "SDM845 Camera Clock Controller"
        select SDM_GCC_845
index d899661..656a87e 100644 (file)
@@ -20,6 +20,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
 obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_MSM_GPUCC_8998) += gpucc-msm8998.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_MMCC_8998) += mmcc-msm8998.o
 obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
 obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
 obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
@@ -45,7 +47,10 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
 obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
 obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
 obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
 obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
+obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
+obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
 obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
 obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
 obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
index a6c89a3..cf69a97 100644 (file)
@@ -19,9 +19,9 @@
 
 static const u32 gpll0_a53cc_map[] = { 4, 5 };
 
-static const char * const gpll0_a53cc[] = {
-       "gpll0_vote",
-       "a53pll",
+static const struct clk_parent_data pdata[] = {
+       { .fw_name = "aux", .name = "gpll0_vote", },
+       { .fw_name = "pll", .name = "a53pll", },
 };
 
 /*
@@ -62,8 +62,8 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        init.name = "a53mux";
-       init.parent_names = gpll0_a53cc;
-       init.num_parents = ARRAY_SIZE(gpll0_a53cc);
+       init.parent_data = pdata;
+       init.num_parents = ARRAY_SIZE(pdata);
        init.ops = &clk_regmap_mux_div_ops;
        init.flags = CLK_SET_RATE_PARENT;
 
@@ -79,7 +79,8 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
        a53cc->pclk = devm_clk_get(parent, NULL);
        if (IS_ERR(a53cc->pclk)) {
                ret = PTR_ERR(a53cc->pclk);
-               dev_err(dev, "failed to get clk: %d\n", ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "failed to get clk: %d\n", ret);
                return ret;
        }
 
index 055318f..7c2936d 100644 (file)
@@ -878,6 +878,14 @@ static long clk_trion_pll_round_rate(struct clk_hw *hw, unsigned long rate,
        return clamp(rate, min_freq, max_freq);
 }
 
+const struct clk_ops clk_alpha_pll_fixed_ops = {
+       .enable = clk_alpha_pll_enable,
+       .disable = clk_alpha_pll_disable,
+       .is_enabled = clk_alpha_pll_is_enabled,
+       .recalc_rate = clk_alpha_pll_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_ops);
+
 const struct clk_ops clk_alpha_pll_ops = {
        .enable = clk_alpha_pll_enable,
        .disable = clk_alpha_pll_disable,
@@ -1024,6 +1032,25 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
                regmap_write(regmap, PLL_CONFIG_CTL(pll),
                                                config->config_ctl_val);
 
+       if (config->config_ctl_hi_val)
+               regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+                                               config->config_ctl_hi_val);
+
+       if (config->user_ctl_val)
+               regmap_write(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+
+       if (config->user_ctl_hi_val)
+               regmap_write(regmap, PLL_USER_CTL_U(pll),
+                                               config->user_ctl_hi_val);
+
+       if (config->test_ctl_val)
+               regmap_write(regmap, PLL_TEST_CTL(pll),
+                                               config->test_ctl_val);
+
+       if (config->test_ctl_hi_val)
+               regmap_write(regmap, PLL_TEST_CTL_U(pll),
+                                               config->test_ctl_hi_val);
+
        if (config->post_div_mask) {
                mask = config->post_div_mask;
                val = config->post_div_val;
@@ -1141,14 +1168,9 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
                                                unsigned long prate)
 {
        struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
-       u32 val, l, alpha_width = pll_alpha_width(pll);
+       u32 l, alpha_width = pll_alpha_width(pll);
        u64 a;
        unsigned long rrate;
-       int ret = 0;
-
-       ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
-       if (ret)
-               return ret;
 
        rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
 
@@ -1167,7 +1189,64 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
        return __clk_alpha_pll_update_latch(pll);
 }
 
+static int alpha_pll_fabia_prepare(struct clk_hw *hw)
+{
+       struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+       const struct pll_vco *vco;
+       struct clk_hw *parent_hw;
+       unsigned long cal_freq, rrate;
+       u32 cal_l, val, alpha_width = pll_alpha_width(pll);
+       u64 a;
+       int ret;
+
+       /* Check if calibration needs to be done i.e. PLL is in reset */
+       ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+       if (ret)
+               return ret;
+
+       /* Return early if calibration is not needed. */
+       if (val & PLL_RESET_N)
+               return 0;
+
+       vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+       if (!vco) {
+               pr_err("alpha pll: not in a valid vco range\n");
+               return -EINVAL;
+       }
+
+       cal_freq = DIV_ROUND_CLOSEST((pll->vco_table[0].min_freq +
+                               pll->vco_table[0].max_freq) * 54, 100);
+
+       parent_hw = clk_hw_get_parent(hw);
+       if (!parent_hw)
+               return -EINVAL;
+
+       rrate = alpha_pll_round_rate(cal_freq, clk_hw_get_rate(parent_hw),
+                                       &cal_l, &a, alpha_width);
+       /*
+        * Due to a limited number of bits for fractional rate programming, the
+        * rounded up rate could be marginally higher than the requested rate.
+        */
+       if (rrate > (cal_freq + FABIA_PLL_RATE_MARGIN) || rrate < cal_freq)
+               return -EINVAL;
+
+       /* Setup PLL for calibration frequency */
+       regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), cal_l);
+
+       /* Bringup the PLL at calibration frequency */
+       ret = clk_alpha_pll_enable(hw);
+       if (ret) {
+               pr_err("alpha pll calibration failed\n");
+               return ret;
+       }
+
+       clk_alpha_pll_disable(hw);
+
+       return 0;
+}
+
 const struct clk_ops clk_alpha_pll_fabia_ops = {
+       .prepare = alpha_pll_fabia_prepare,
        .enable = alpha_pll_fabia_enable,
        .disable = alpha_pll_fabia_disable,
        .is_enabled = clk_alpha_pll_is_enabled,
index 15f27f4..fbc1f67 100644 (file)
@@ -94,6 +94,10 @@ struct alpha_pll_config {
        u32 alpha_hi;
        u32 config_ctl_val;
        u32 config_ctl_hi_val;
+       u32 user_ctl_val;
+       u32 user_ctl_hi_val;
+       u32 test_ctl_val;
+       u32 test_ctl_hi_val;
        u32 main_output_mask;
        u32 aux_output_mask;
        u32 aux2_output_mask;
@@ -109,6 +113,7 @@ struct alpha_pll_config {
 };
 
 extern const struct clk_ops clk_alpha_pll_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_ops;
 extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
 extern const struct clk_ops clk_alpha_pll_postdiv_ops;
 extern const struct clk_ops clk_alpha_pll_huayra_ops;
index 3c04805..e847d58 100644 (file)
@@ -196,7 +196,7 @@ static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw,
        return l_val * parent_rate;
 }
 
-static void clk_hfpll_init(struct clk_hw *hw)
+static int clk_hfpll_init(struct clk_hw *hw)
 {
        struct clk_hfpll *h = to_clk_hfpll(hw);
        struct hfpll_data const *hd = h->d;
@@ -206,7 +206,7 @@ static void clk_hfpll_init(struct clk_hw *hw)
        regmap_read(regmap, hd->mode_reg, &mode);
        if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) {
                __clk_hfpll_init_once(hw);
-               return;
+               return 0;
        }
 
        if (hd->status_reg) {
@@ -218,6 +218,8 @@ static void clk_hfpll_init(struct clk_hw *hw)
                        __clk_hfpll_init_once(hw);
                }
        }
+
+       return 0;
 }
 
 static int hfpll_is_enabled(struct clk_hw *hw)
index 78358b8..86d2b8b 100644 (file)
@@ -161,6 +161,7 @@ extern const struct clk_ops clk_byte2_ops;
 extern const struct clk_ops clk_pixel_ops;
 extern const struct clk_ops clk_gfx3d_ops;
 extern const struct clk_ops clk_rcg2_shared_ops;
+extern const struct clk_ops clk_dp_ops;
 
 struct clk_rcg_dfs_data {
        struct clk_rcg2 *rcg;
index 8f4b9be..da045b2 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/export.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
+#include <linux/rational.h>
 #include <linux/regmap.h>
 #include <linux/math64.h>
 #include <linux/slab.h>
@@ -1124,3 +1125,79 @@ int qcom_cc_register_rcg_dfs(struct regmap *regmap,
        return 0;
 }
 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
+
+static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long parent_rate)
+{
+       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+       struct freq_tbl f = { 0 };
+       u32 mask = BIT(rcg->hid_width) - 1;
+       u32 hid_div, cfg;
+       int i, num_parents = clk_hw_get_num_parents(hw);
+       unsigned long num, den;
+
+       rational_best_approximation(parent_rate, rate,
+                       GENMASK(rcg->mnd_width - 1, 0),
+                       GENMASK(rcg->mnd_width - 1, 0), &den, &num);
+
+       if (!num || !den)
+               return -EINVAL;
+
+       regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+       hid_div = cfg;
+       cfg &= CFG_SRC_SEL_MASK;
+       cfg >>= CFG_SRC_SEL_SHIFT;
+
+       for (i = 0; i < num_parents; i++) {
+               if (cfg == rcg->parent_map[i].cfg) {
+                       f.src = rcg->parent_map[i].src;
+                       break;
+               }
+       }
+
+       f.pre_div = hid_div;
+       f.pre_div >>= CFG_SRC_DIV_SHIFT;
+       f.pre_div &= mask;
+
+       if (num != den) {
+               f.m = num;
+               f.n = den;
+       } else {
+               f.m = 0;
+               f.n = 0;
+       }
+
+       return clk_rcg2_configure(rcg, &f);
+}
+
+static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
+               unsigned long rate, unsigned long parent_rate, u8 index)
+{
+       return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
+                               struct clk_rate_request *req)
+{
+       struct clk_rate_request parent_req = *req;
+       int ret;
+
+       ret = __clk_determine_rate(clk_hw_get_parent(hw), &parent_req);
+       if (ret)
+               return ret;
+
+       req->best_parent_rate = parent_req.rate;
+
+       return 0;
+}
+
+const struct clk_ops clk_dp_ops = {
+       .is_enabled = clk_rcg2_is_enabled,
+       .get_parent = clk_rcg2_get_parent,
+       .set_parent = clk_rcg2_set_parent,
+       .recalc_rate = clk_rcg2_recalc_rate,
+       .set_rate = clk_rcg2_dp_set_rate,
+       .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
+       .determine_rate = clk_rcg2_dp_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_dp_ops);
index 7ed313a..98a118c 100644 (file)
@@ -396,6 +396,7 @@ static struct clk_hw *sc7180_rpmh_clocks[] = {
        [RPMH_RF_CLK1_A]        = &sdm845_rf_clk1_ao.hw,
        [RPMH_RF_CLK2]          = &sdm845_rf_clk2.hw,
        [RPMH_RF_CLK2_A]        = &sdm845_rf_clk2_ao.hw,
+       [RPMH_IPA_CLK]          = &sdm845_ipa.hw,
 };
 
 static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
@@ -431,11 +432,16 @@ static int clk_rpmh_probe(struct platform_device *pdev)
        hw_clks = desc->clks;
 
        for (i = 0; i < desc->num_clks; i++) {
-               const char *name = hw_clks[i]->init->name;
+               const char *name;
                u32 res_addr;
                size_t aux_data_len;
                const struct bcm_db *data;
 
+               if (!hw_clks[i])
+                       continue;
+
+               name = hw_clks[i]->init->name;
+
                rpmh_clk = to_clk_rpmh(hw_clks[i]);
                res_addr = cmd_db_read_addr(rpmh_clk->res_name);
                if (!res_addr) {
@@ -481,9 +487,9 @@ static int clk_rpmh_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id clk_rpmh_match_table[] = {
+       { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
        { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
        { .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
-       { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
        { }
 };
 MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
index 930fa4a..0bbfef9 100644 (file)
@@ -485,6 +485,8 @@ static struct clk_smd_rpm *msm8974_clks[] = {
        [RPM_SMD_MMSSNOC_AHB_CLK]       = &msm8974_mmssnoc_ahb_clk,
        [RPM_SMD_MMSSNOC_AHB_A_CLK]     = &msm8974_mmssnoc_ahb_a_clk,
        [RPM_SMD_BIMC_CLK]              = &msm8974_bimc_clk,
+       [RPM_SMD_GFX3D_CLK_SRC]         = &msm8974_gfx3d_clk_src,
+       [RPM_SMD_GFX3D_A_CLK_SRC]       = &msm8974_gfx3d_a_clk_src,
        [RPM_SMD_BIMC_A_CLK]            = &msm8974_bimc_a_clk,
        [RPM_SMD_OCMEMGX_CLK]           = &msm8974_ocmemgx_clk,
        [RPM_SMD_OCMEMGX_A_CLK]         = &msm8974_ocmemgx_a_clk,
@@ -648,6 +650,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
 };
 
 /* msm8998 */
+DEFINE_CLK_SMD_RPM(msm8998, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
 DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
 DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
 DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
@@ -671,6 +674,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
 DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
 static struct clk_smd_rpm *msm8998_clks[] = {
+       [RPM_SMD_BIMC_CLK] = &msm8998_bimc_clk,
+       [RPM_SMD_BIMC_A_CLK] = &msm8998_bimc_a_clk,
        [RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
        [RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
        [RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
new file mode 100644 (file)
index 0000000..30c1e25
--- /dev/null
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+       P_BI_TCXO,
+       P_CHIP_SLEEP_CLK,
+       P_CORE_BI_PLL_TEST_SE,
+       P_DISP_CC_PLL0_OUT_EVEN,
+       P_DISP_CC_PLL0_OUT_MAIN,
+       P_DP_PHY_PLL_LINK_CLK,
+       P_DP_PHY_PLL_VCO_DIV_CLK,
+       P_DSI0_PHY_PLL_OUT_BYTECLK,
+       P_DSI0_PHY_PLL_OUT_DSICLK,
+       P_GPLL0_OUT_MAIN,
+};
+
+static const struct pll_vco fabia_vco[] = {
+       { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+       .offset = 0x0,
+       .vco_table = fabia_vco,
+       .num_vco = ARRAY_SIZE(fabia_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_pll0",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fabia_ops,
+               },
+       },
+};
+
+static const struct clk_div_table post_div_table_disp_cc_pll0_out_even[] = {
+       { 0x0, 1 },
+       { }
+};
+
+static struct clk_alpha_pll_postdiv disp_cc_pll0_out_even = {
+       .offset = 0x0,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_disp_cc_pll0_out_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_disp_cc_pll0_out_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_pll0_out_even",
+               .parent_data = &(const struct clk_parent_data){
+                       .hw = &disp_cc_pll0.clkr.hw,
+               },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+       { P_BI_TCXO, 0 },
+       { P_DP_PHY_PLL_LINK_CLK, 1 },
+       { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dp_phy_pll_link_clk", .name = "dp_phy_pll_link_clk" },
+       { .fw_name = "dp_phy_pll_vco_div_clk",
+                               .name = "dp_phy_pll_vco_div_clk"},
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+       { P_BI_TCXO, 0 },
+       { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dsi0_phy_pll_out_byteclk",
+                               .name = "dsi0_phy_pll_out_byteclk" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+       { P_BI_TCXO, 0 },
+       { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+       { P_GPLL0_OUT_MAIN, 4 },
+       { P_DISP_CC_PLL0_OUT_EVEN, 5 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &disp_cc_pll0.clkr.hw },
+       { .fw_name = "gcc_disp_gpll0_clk_src" },
+       { .hw = &disp_cc_pll0_out_even.clkr.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+       { P_BI_TCXO, 0 },
+       { P_GPLL0_OUT_MAIN, 4 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "gcc_disp_gpll0_clk_src" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+       { P_BI_TCXO, 0 },
+       { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dsi0_phy_pll_out_dsiclk",
+                               .name = "dsi0_phy_pll_out_dsiclk" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+       F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+       .cmd_rcgr = 0x22bc,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_4,
+       .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_ahb_clk_src",
+               .parent_data = disp_cc_parent_data_4,
+               .num_parents = 3,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+       .cmd_rcgr = 0x2110,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_2,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_byte0_clk_src",
+               .parent_data = disp_cc_parent_data_2,
+               .num_parents = 3,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_byte2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+       .cmd_rcgr = 0x21dc,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_aux_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+       .cmd_rcgr = 0x2194,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_crypto_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = 4,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_byte2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+       .cmd_rcgr = 0x2178,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_link_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = 4,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_byte2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+       .cmd_rcgr = 0x21ac,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_pixel_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = 4,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_dp_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+       .cmd_rcgr = 0x2148,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_2,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_esc0_clk_src",
+               .parent_data = disp_cc_parent_data_2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+       F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+       F(345000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+       F(460000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+       .cmd_rcgr = 0x20c8,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_3,
+       .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_mdp_clk_src",
+               .parent_data = disp_cc_parent_data_3,
+               .num_parents = 5,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+       .cmd_rcgr = 0x2098,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_5,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_pclk0_clk_src",
+               .parent_data = disp_cc_parent_data_5,
+               .num_parents = 3,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_pixel_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+       .cmd_rcgr = 0x20e0,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_3,
+       .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_rot_clk_src",
+               .parent_data = disp_cc_parent_data_3,
+               .num_parents = 5,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+       .cmd_rcgr = 0x20f8,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_vsync_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+       .halt_reg = 0x2080,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2080,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_ahb_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+       .halt_reg = 0x2028,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_byte0_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+       .reg = 0x2128,
+       .shift = 0,
+       .width = 2,
+       .clkr.hw.init = &(struct clk_init_data) {
+               .name = "disp_cc_mdss_byte0_div_clk_src",
+               .parent_data = &(const struct clk_parent_data){
+                       .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw
+               },
+               .num_parents = 1,
+               .ops = &clk_regmap_div_ops,
+       },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+       .reg = 0x2190,
+       .shift = 0,
+       .width = 2,
+       .clkr.hw.init = &(struct clk_init_data) {
+               .name = "disp_cc_mdss_dp_link_div_clk_src",
+               .parent_data = &(const struct clk_parent_data){
+                       .hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw
+               },
+               .num_parents = 1,
+               .ops = &clk_regmap_div_ops,
+       },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+       .halt_reg = 0x202c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x202c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_byte0_intf_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+       .halt_reg = 0x2054,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2054,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_aux_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+       .halt_reg = 0x2048,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2048,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_crypto_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+       .halt_reg = 0x2040,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2040,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_link_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+       .halt_reg = 0x2044,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2044,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_link_intf_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+       .halt_reg = 0x204c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x204c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_pixel_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+       .halt_reg = 0x2038,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2038,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_esc0_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_esc0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+       .halt_reg = 0x200c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x200c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_mdp_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+       .halt_reg = 0x201c,
+       .halt_check = BRANCH_VOTED,
+       .clkr = {
+               .enable_reg = 0x201c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_mdp_lut_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+       .halt_reg = 0x4004,
+       .halt_check = BRANCH_VOTED,
+       .clkr = {
+               .enable_reg = 0x4004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+       .halt_reg = 0x2004,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_pclk0_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+       .halt_reg = 0x2014,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2014,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_rot_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+       .halt_reg = 0x400c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x400c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_rscc_ahb_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+       .halt_reg = 0x4008,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x4008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_rscc_vsync_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+       .halt_reg = 0x2024,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_vsync_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc mdss_gdsc = {
+       .gdscr = 0x3000,
+       .pd = {
+               .name = "mdss_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = HW_CTRL,
+};
+
+static struct gdsc *disp_cc_sc7180_gdscs[] = {
+       [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static struct clk_regmap *disp_cc_sc7180_clocks[] = {
+       [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+       [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+       [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+       [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+       [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+       [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+       [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+       [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
+                               &disp_cc_mdss_dp_link_div_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+       [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+       [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+       [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+       [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+       [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+       [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+       [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+       [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+       [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+       [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+       [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+       [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+       [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+       [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+       [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+       [DISP_CC_PLL0_OUT_EVEN] = &disp_cc_pll0_out_even.clkr,
+};
+
+static const struct regmap_config disp_cc_sc7180_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .max_register = 0x10000,
+       .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sc7180_desc = {
+       .config = &disp_cc_sc7180_regmap_config,
+       .clks = disp_cc_sc7180_clocks,
+       .num_clks = ARRAY_SIZE(disp_cc_sc7180_clocks),
+       .gdscs = disp_cc_sc7180_gdscs,
+       .num_gdscs = ARRAY_SIZE(disp_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id disp_cc_sc7180_match_table[] = {
+       { .compatible = "qcom,sc7180-dispcc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sc7180_match_table);
+
+static int disp_cc_sc7180_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+       struct alpha_pll_config disp_cc_pll_config = {};
+
+       regmap = qcom_cc_map(pdev, &disp_cc_sc7180_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* 1380MHz configuration */
+       disp_cc_pll_config.l = 0x47;
+       disp_cc_pll_config.alpha = 0xe000;
+       disp_cc_pll_config.user_ctl_val = 0x00000001;
+       disp_cc_pll_config.user_ctl_hi_val = 0x00004805;
+
+       clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll_config);
+
+       return qcom_cc_really_probe(pdev, &disp_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sc7180_driver = {
+       .probe = disp_cc_sc7180_probe,
+       .driver = {
+               .name = "sc7180-dispcc",
+               .of_match_table = disp_cc_sc7180_match_table,
+       },
+};
+
+static int __init disp_cc_sc7180_init(void)
+{
+       return platform_driver_register(&disp_cc_sc7180_driver);
+}
+subsys_initcall(disp_cc_sc7180_init);
+
+static void __exit disp_cc_sc7180_exit(void)
+{
+       platform_driver_unregister(&disp_cc_sc7180_driver);
+}
+module_exit(disp_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI DISP_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
index 0cc4909..5c932cd 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/clk-provider.h>
@@ -29,6 +29,8 @@ enum {
        P_DSI1_PHY_PLL_OUT_DSICLK,
        P_GPLL0_OUT_MAIN,
        P_GPLL0_OUT_MAIN_DIV,
+       P_DP_PHY_PLL_LINK_CLK,
+       P_DP_PHY_PLL_VCO_DIV_CLK,
 };
 
 static const struct parent_map disp_cc_parent_map_0[] = {
@@ -45,6 +47,20 @@ static const char * const disp_cc_parent_names_0[] = {
        "core_bi_pll_test_se",
 };
 
+static const struct parent_map disp_cc_parent_map_1[] = {
+       { P_BI_TCXO, 0 },
+       { P_DP_PHY_PLL_LINK_CLK, 1 },
+       { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_1[] = {
+       "bi_tcxo",
+       "dp_link_clk_divsel_ten",
+       "dp_vco_divided_clk_src_mux",
+       "core_bi_pll_test_se",
+};
+
 static const struct parent_map disp_cc_parent_map_2[] = {
        { P_BI_TCXO, 0 },
        { P_CORE_BI_PLL_TEST_SE, 7 },
@@ -128,6 +144,81 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
        },
 };
 
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+       .cmd_rcgr = 0x219c,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_2,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_aux_clk_src",
+               .parent_names = disp_cc_parent_names_2,
+               .num_parents = 2,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+       .cmd_rcgr = 0x2154,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_crypto_clk_src",
+               .parent_names = disp_cc_parent_names_1,
+               .num_parents = 4,
+               .ops = &clk_byte2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+       .cmd_rcgr = 0x2138,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_link_clk_src",
+               .parent_names = disp_cc_parent_names_1,
+               .num_parents = 4,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_byte2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+       .cmd_rcgr = 0x2184,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_pixel1_clk_src",
+               .parent_names = disp_cc_parent_names_1,
+               .num_parents = 4,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_dp_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+       .cmd_rcgr = 0x216c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_pixel_clk_src",
+               .parent_names = disp_cc_parent_names_1,
+               .num_parents = 4,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_dp_ops,
+       },
+};
+
 static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
        F(19200000, P_BI_TCXO, 1, 0, 0),
        { }
@@ -391,6 +482,114 @@ static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
        },
 };
 
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+       .halt_reg = 0x2054,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2054,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_aux_clk",
+                       .parent_names = (const char *[]){
+                               "disp_cc_mdss_dp_aux_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+       .halt_reg = 0x2048,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2048,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_crypto_clk",
+                       .parent_names = (const char *[]){
+                               "disp_cc_mdss_dp_crypto_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+       .halt_reg = 0x2040,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2040,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_link_clk",
+                       .parent_names = (const char *[]){
+                               "disp_cc_mdss_dp_link_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+/* reset state of disp_cc_mdss_dp_link_div_clk_src divider is 0x3 (div 4) */
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+       .halt_reg = 0x2044,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2044,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_link_intf_clk",
+                       .parent_names = (const char *[]){
+                               "disp_cc_mdss_dp_link_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+       .halt_reg = 0x2050,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2050,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_pixel1_clk",
+                       .parent_names = (const char *[]){
+                               "disp_cc_mdss_dp_pixel1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+       .halt_reg = 0x204c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x204c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_pixel_clk",
+                       .parent_names = (const char *[]){
+                               "disp_cc_mdss_dp_pixel_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
 static struct clk_branch disp_cc_mdss_esc0_clk = {
        .halt_reg = 0x2038,
        .halt_check = BRANCH_HALT,
@@ -589,6 +788,19 @@ static struct clk_regmap *disp_cc_sdm845_clocks[] = {
        [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
        [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] =
                                        &disp_cc_mdss_byte1_div_clk_src.clkr,
+       [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+       [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+       [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+       [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] =
+                                       &disp_cc_mdss_dp_crypto_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+       [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] =
+                                       &disp_cc_mdss_dp_pixel1_clk_src.clkr,
+       [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
        [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
        [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
        [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
new file mode 100644 (file)
index 0000000..3f9c2f6
--- /dev/null
@@ -0,0 +1,4635 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,gcc-ipq6018.h>
+#include <dt-bindings/reset/qcom,gcc-ipq6018.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+       P_XO,
+       P_BIAS_PLL,
+       P_UNIPHY0_RX,
+       P_UNIPHY0_TX,
+       P_UNIPHY1_RX,
+       P_BIAS_PLL_NSS_NOC,
+       P_UNIPHY1_TX,
+       P_PCIE20_PHY0_PIPE,
+       P_USB3PHY_0_PIPE,
+       P_GPLL0,
+       P_GPLL0_DIV2,
+       P_GPLL2,
+       P_GPLL4,
+       P_GPLL6,
+       P_SLEEP_CLK,
+       P_UBI32_PLL,
+       P_NSS_CRYPTO_PLL,
+       P_PI_SLEEP,
+};
+
+static struct clk_alpha_pll gpll0_main = {
+       .offset = 0x21000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .clkr = {
+               .enable_reg = 0x0b000,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpll0_main",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_ops,
+               },
+       },
+};
+
+static struct clk_fixed_factor gpll0_out_main_div2 = {
+       .mult = 1,
+       .div = 2,
+       .hw.init = &(struct clk_init_data){
+               .name = "gpll0_out_main_div2",
+               .parent_hws = (const struct clk_hw *[]){
+                               &gpll0_main.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_fixed_factor_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+       .offset = 0x21000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .width = 4,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpll0",
+               .parent_hws = (const struct clk_hw *[]){
+                               &gpll0_main.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_ro_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0_out_main_div2[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw},
+       { .hw = &gpll0_out_main_div2.hw},
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_alpha_pll ubi32_pll_main = {
+       .offset = 0x25000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA],
+       .flags = SUPPORTS_DYNAMIC_UPDATE,
+       .clkr = {
+               .enable_reg = 0x0b000,
+               .enable_mask = BIT(6),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ubi32_pll_main",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_huayra_ops,
+               },
+       },
+};
+
+static struct clk_alpha_pll_postdiv ubi32_pll = {
+       .offset = 0x25000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA],
+       .width = 2,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ubi32_pll",
+               .parent_hws = (const struct clk_hw *[]){
+                               &ubi32_pll_main.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_ro_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_alpha_pll gpll6_main = {
+       .offset = 0x37000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+       .clkr = {
+               .enable_reg = 0x0b000,
+               .enable_mask = BIT(7),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpll6_main",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_ops,
+               },
+       },
+};
+
+static struct clk_alpha_pll_postdiv gpll6 = {
+       .offset = 0x37000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+       .width = 2,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpll6",
+               .parent_hws = (const struct clk_hw *[]){
+                               &gpll6_main.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_ro_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_alpha_pll gpll4_main = {
+       .offset = 0x24000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .clkr = {
+               .enable_reg = 0x0b000,
+               .enable_mask = BIT(5),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpll4_main",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_ops,
+               },
+       },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+       .offset = 0x24000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .width = 4,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpll4",
+               .parent_hws = (const struct clk_hw *[]){
+                               &gpll4_main.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_ro_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static const struct freq_tbl ftbl_pcnoc_bfdcd_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(50000000, P_GPLL0, 16, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+       .cmd_rcgr = 0x27000,
+       .freq_tbl = ftbl_pcnoc_bfdcd_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pcnoc_bfdcd_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_alpha_pll gpll2_main = {
+       .offset = 0x4a000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .clkr = {
+               .enable_reg = 0x0b000,
+               .enable_mask = BIT(2),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpll2_main",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_ops,
+               },
+       },
+};
+
+static struct clk_alpha_pll_postdiv gpll2 = {
+       .offset = 0x4a000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .width = 4,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpll2",
+               .parent_hws = (const struct clk_hw *[]){
+                               &gpll2_main.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_ro_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_alpha_pll nss_crypto_pll_main = {
+       .offset = 0x22000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .clkr = {
+               .enable_reg = 0x0b000,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_crypto_pll_main",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_ops,
+               },
+       },
+};
+
+static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+       .offset = 0x22000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .width = 4,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_crypto_pll",
+               .parent_hws = (const struct clk_hw *[]){
+                               &nss_crypto_pll_main.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_ro_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static const struct freq_tbl ftbl_qdss_tsctr_clk_src[] = {
+       F(160000000, P_GPLL0_DIV2, 2.5, 0, 0),
+       F(320000000, P_GPLL0, 2.5, 0, 0),
+       F(600000000, P_GPLL4, 2, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll4_gpll0_gpll6_gpll0_div2[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll4.clkr.hw },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll6.clkr.hw },
+       { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map[] = {
+       { P_XO, 0 },
+       { P_GPLL4, 1 },
+       { P_GPLL0, 2 },
+       { P_GPLL6, 3 },
+       { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 qdss_tsctr_clk_src = {
+       .cmd_rcgr = 0x29064,
+       .freq_tbl = ftbl_qdss_tsctr_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "qdss_tsctr_clk_src",
+               .parent_data = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_fixed_factor qdss_dap_sync_clk_src = {
+       .mult = 1,
+       .div = 4,
+       .hw.init = &(struct clk_init_data){
+               .name = "qdss_dap_sync_clk_src",
+               .parent_hws = (const struct clk_hw *[]){
+                               &qdss_tsctr_clk_src.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_fixed_factor_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_qdss_at_clk_src[] = {
+       F(66670000, P_GPLL0_DIV2, 6, 0, 0),
+       F(240000000, P_GPLL4, 5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 qdss_at_clk_src = {
+       .cmd_rcgr = 0x2900c,
+       .freq_tbl = ftbl_qdss_at_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "qdss_at_clk_src",
+               .parent_data = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_fixed_factor qdss_tsctr_div2_clk_src = {
+       .mult = 1,
+       .div = 2,
+       .hw.init = &(struct clk_init_data){
+               .name = "qdss_tsctr_div2_clk_src",
+               .parent_hws = (const struct clk_hw *[]){
+                               &qdss_tsctr_clk_src.clkr.hw },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_fixed_factor_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_nss_ppe_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(300000000, P_BIAS_PLL, 1, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_bias_gpll0_gpll4_nss_ubi32[] = {
+       { .fw_name = "xo" },
+       { .fw_name = "bias_pll_cc_clk" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll4.clkr.hw },
+       { .hw = &nss_crypto_pll.clkr.hw },
+       { .hw = &ubi32_pll.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_bias_gpll0_gpll4_nss_ubi32_map[] = {
+       { P_XO, 0 },
+       { P_BIAS_PLL, 1 },
+       { P_GPLL0, 2 },
+       { P_GPLL4, 3 },
+       { P_NSS_CRYPTO_PLL, 4 },
+       { P_UBI32_PLL, 5 },
+};
+
+static struct clk_rcg2 nss_ppe_clk_src = {
+       .cmd_rcgr = 0x68080,
+       .freq_tbl = ftbl_nss_ppe_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_bias_gpll0_gpll4_nss_ubi32_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_ppe_clk_src",
+               .parent_data = gcc_xo_bias_gpll0_gpll4_nss_ubi32,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_xo_clk_src = {
+       .halt_reg = 0x30018,
+       .clkr = {
+               .enable_reg = 0x30018,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_xo_clk_src",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_nss_ce_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(200000000, P_GPLL0, 4, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+};
+
+static struct clk_rcg2 nss_ce_clk_src = {
+       .cmd_rcgr = 0x68098,
+       .freq_tbl = ftbl_nss_ce_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_ce_clk_src",
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_sleep_clk_src = {
+       .halt_reg = 0x30000,
+       .clkr = {
+               .enable_reg = 0x30000,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sleep_clk_src",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "sleep_clk",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_snoc_nssnoc_bfdcd_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       F(133333333, P_GPLL0, 6, 0, 0),
+       F(160000000, P_GPLL0, 5, 0, 0),
+       F(200000000, P_GPLL0, 4, 0, 0),
+       F(266666667, P_GPLL0, 3, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data
+                       gcc_xo_gpll0_gpll6_gpll0_out_main_div2[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll6.clkr.hw },
+       { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL6, 2 },
+       { P_GPLL0_DIV2, 3 },
+};
+
+static struct clk_rcg2 snoc_nssnoc_bfdcd_clk_src = {
+       .cmd_rcgr = 0x76054,
+       .freq_tbl = ftbl_snoc_nssnoc_bfdcd_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "snoc_nssnoc_bfdcd_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll6_gpll0_out_main_div2,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+       F(50000000, P_GPLL0, 16, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+       .cmd_rcgr = 0x46000,
+       .freq_tbl = ftbl_apss_ahb_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "apss_ahb_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
+       F(25000000, P_UNIPHY0_RX, 5, 0, 0),
+       F(78125000, P_UNIPHY1_RX, 4, 0, 0),
+       F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
+       F(125000000, P_UNIPHY0_RX, 1, 0, 0),
+       F(156250000, P_UNIPHY1_RX, 2, 0, 0),
+       F(312500000, P_UNIPHY1_RX, 1, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data
+gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias[] = {
+       { .fw_name = "xo" },
+       { .fw_name = "uniphy0_gcc_rx_clk" },
+       { .fw_name = "uniphy0_gcc_tx_clk" },
+       { .fw_name = "uniphy1_gcc_rx_clk" },
+       { .fw_name = "uniphy1_gcc_tx_clk" },
+       { .hw = &ubi32_pll.clkr.hw },
+       { .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map
+gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = {
+       { P_XO, 0 },
+       { P_UNIPHY0_RX, 1 },
+       { P_UNIPHY0_TX, 2 },
+       { P_UNIPHY1_RX, 3 },
+       { P_UNIPHY1_TX, 4 },
+       { P_UBI32_PLL, 5 },
+       { P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port5_rx_clk_src = {
+       .cmd_rcgr = 0x68060,
+       .freq_tbl = ftbl_nss_port5_rx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port5_rx_clk_src",
+               .parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
+       F(25000000, P_UNIPHY0_TX, 5, 0, 0),
+       F(78125000, P_UNIPHY1_TX, 4, 0, 0),
+       F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
+       F(125000000, P_UNIPHY0_TX, 1, 0, 0),
+       F(156250000, P_UNIPHY1_TX, 2, 0, 0),
+       F(312500000, P_UNIPHY1_TX, 1, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data
+gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias[] = {
+       { .fw_name = "xo" },
+       { .fw_name = "uniphy0_gcc_tx_clk" },
+       { .fw_name = "uniphy0_gcc_rx_clk" },
+       { .fw_name = "uniphy1_gcc_tx_clk" },
+       { .fw_name = "uniphy1_gcc_rx_clk" },
+       { .hw = &ubi32_pll.clkr.hw },
+       { .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map
+gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = {
+       { P_XO, 0 },
+       { P_UNIPHY0_TX, 1 },
+       { P_UNIPHY0_RX, 2 },
+       { P_UNIPHY1_TX, 3 },
+       { P_UNIPHY1_RX, 4 },
+       { P_UBI32_PLL, 5 },
+       { P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port5_tx_clk_src = {
+       .cmd_rcgr = 0x68068,
+       .freq_tbl = ftbl_nss_port5_tx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port5_tx_clk_src",
+               .parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_pcie_axi_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(200000000, P_GPLL0, 4, 0, 0),
+       F(240000000, P_GPLL4, 5, 0, 0),
+       { }
+};
+
+static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL4, 2 },
+};
+
+static struct clk_rcg2 pcie0_axi_clk_src = {
+       .cmd_rcgr = 0x75054,
+       .freq_tbl = ftbl_pcie_axi_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll4_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pcie0_axi_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll4,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_usb0_master_clk_src[] = {
+       F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       F(133330000, P_GPLL0, 6, 0, 0),
+       F(200000000, P_GPLL0, 4, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_out_main_div2_gpll0[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0_out_main_div2.hw },
+       { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_out_main_div2_gpll0_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0_DIV2, 2 },
+       { P_GPLL0, 1 },
+};
+
+static struct clk_rcg2 usb0_master_clk_src = {
+       .cmd_rcgr = 0x3e00c,
+       .freq_tbl = ftbl_usb0_master_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb0_master_clk_src",
+               .parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_regmap_div apss_ahb_postdiv_clk_src = {
+       .reg = 0x46018,
+       .shift = 4,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "apss_ahb_postdiv_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &apss_ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+               },
+       },
+};
+
+static struct clk_fixed_factor gcc_xo_div4_clk_src = {
+       .mult = 1,
+       .div = 4,
+       .hw.init = &(struct clk_init_data){
+               .name = "gcc_xo_div4_clk_src",
+               .parent_hws = (const struct clk_hw *[]){
+                               &gcc_xo_clk_src.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_fixed_factor_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static const struct freq_tbl ftbl_nss_port1_rx_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(25000000, P_UNIPHY0_RX, 5, 0, 0),
+       F(125000000, P_UNIPHY0_RX, 1, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_uniphy0_rx_tx_ubi32_bias[] = {
+       { .fw_name = "xo" },
+       { .fw_name = "uniphy0_gcc_rx_clk" },
+       { .fw_name = "uniphy0_gcc_tx_clk" },
+       { .hw = &ubi32_pll.clkr.hw },
+       { .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map gcc_xo_uniphy0_rx_tx_ubi32_bias_map[] = {
+       { P_XO, 0 },
+       { P_UNIPHY0_RX, 1 },
+       { P_UNIPHY0_TX, 2 },
+       { P_UBI32_PLL, 5 },
+       { P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port1_rx_clk_src = {
+       .cmd_rcgr = 0x68020,
+       .freq_tbl = ftbl_nss_port1_rx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port1_rx_clk_src",
+               .parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_nss_port1_tx_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(25000000, P_UNIPHY0_TX, 5, 0, 0),
+       F(125000000, P_UNIPHY0_TX, 1, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_uniphy0_tx_rx_ubi32_bias[] = {
+       { .fw_name = "xo" },
+       { .fw_name = "uniphy0_gcc_tx_clk" },
+       { .fw_name = "uniphy0_gcc_rx_clk" },
+       { .hw = &ubi32_pll.clkr.hw },
+       { .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map gcc_xo_uniphy0_tx_rx_ubi32_bias_map[] = {
+       { P_XO, 0 },
+       { P_UNIPHY0_TX, 1 },
+       { P_UNIPHY0_RX, 2 },
+       { P_UBI32_PLL, 5 },
+       { P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port1_tx_clk_src = {
+       .cmd_rcgr = 0x68028,
+       .freq_tbl = ftbl_nss_port1_tx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port1_tx_clk_src",
+               .parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 nss_port2_rx_clk_src = {
+       .cmd_rcgr = 0x68030,
+       .freq_tbl = ftbl_nss_port1_rx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port2_rx_clk_src",
+               .parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 nss_port2_tx_clk_src = {
+       .cmd_rcgr = 0x68038,
+       .freq_tbl = ftbl_nss_port1_tx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port2_tx_clk_src",
+               .parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 nss_port3_rx_clk_src = {
+       .cmd_rcgr = 0x68040,
+       .freq_tbl = ftbl_nss_port1_rx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port3_rx_clk_src",
+               .parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 nss_port3_tx_clk_src = {
+       .cmd_rcgr = 0x68048,
+       .freq_tbl = ftbl_nss_port1_tx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port3_tx_clk_src",
+               .parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 nss_port4_rx_clk_src = {
+       .cmd_rcgr = 0x68050,
+       .freq_tbl = ftbl_nss_port1_rx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port4_rx_clk_src",
+               .parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 nss_port4_tx_clk_src = {
+       .cmd_rcgr = 0x68058,
+       .freq_tbl = ftbl_nss_port1_tx_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_port4_tx_clk_src",
+               .parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_regmap_div nss_port5_rx_div_clk_src = {
+       .reg = 0x68440,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port5_rx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port5_rx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div nss_port5_tx_div_clk_src = {
+       .reg = 0x68444,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port5_tx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port5_tx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_apss_axi_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(100000000, P_GPLL0_DIV2, 4, 0, 0),
+       F(200000000, P_GPLL0, 4, 0, 0),
+       F(308570000, P_GPLL6, 3.5, 0, 0),
+       F(400000000, P_GPLL0, 2, 0, 0),
+       F(533000000, P_GPLL0, 1.5, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_ubi32_gpll0_div2[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll6.clkr.hw },
+       { .hw = &ubi32_pll.clkr.hw },
+       { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map
+gcc_xo_gpll0_gpll6_ubi32_gpll0_div2_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL6, 2 },
+       { P_UBI32_PLL, 3 },
+       { P_GPLL0_DIV2, 6 },
+};
+
+static struct clk_rcg2 apss_axi_clk_src = {
+       .cmd_rcgr = 0x38048,
+       .freq_tbl = ftbl_apss_axi_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll6_ubi32_gpll0_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "apss_axi_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll6_ubi32_gpll0_div2,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_nss_crypto_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(300000000, P_NSS_CRYPTO_PLL, 2, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_nss_crypto_pll_gpll0[] = {
+       { .fw_name = "xo" },
+       { .hw = &nss_crypto_pll.clkr.hw },
+       { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_nss_crypto_pll_gpll0_map[] = {
+       { P_XO, 0 },
+       { P_NSS_CRYPTO_PLL, 1 },
+       { P_GPLL0, 2 },
+};
+
+static struct clk_rcg2 nss_crypto_clk_src = {
+       .cmd_rcgr = 0x68144,
+       .freq_tbl = ftbl_nss_crypto_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_nss_crypto_pll_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_crypto_clk_src",
+               .parent_data = gcc_xo_nss_crypto_pll_gpll0,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_regmap_div nss_port1_rx_div_clk_src = {
+       .reg = 0x68400,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port1_rx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                               &nss_port1_rx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div nss_port1_tx_div_clk_src = {
+       .reg = 0x68404,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port1_tx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port1_tx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div nss_port2_rx_div_clk_src = {
+       .reg = 0x68410,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port2_rx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port2_rx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div nss_port2_tx_div_clk_src = {
+       .reg = 0x68414,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port2_tx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port2_tx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div nss_port3_rx_div_clk_src = {
+       .reg = 0x68420,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port3_rx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port3_rx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div nss_port3_tx_div_clk_src = {
+       .reg = 0x68424,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port3_tx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port3_tx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div nss_port4_rx_div_clk_src = {
+       .reg = 0x68430,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port4_rx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port4_rx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div nss_port4_tx_div_clk_src = {
+       .reg = 0x68434,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_port4_tx_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port4_tx_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_nss_ubi_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(149760000, P_UBI32_PLL, 10, 0, 0),
+       F(187200000, P_UBI32_PLL, 8, 0, 0),
+       F(249600000, P_UBI32_PLL, 6, 0, 0),
+       F(374400000, P_UBI32_PLL, 4, 0, 0),
+       F(748800000, P_UBI32_PLL, 2, 0, 0),
+       F(1497600000, P_UBI32_PLL, 1, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data
+                       gcc_xo_ubi32_pll_gpll0_gpll2_gpll4_gpll6[] = {
+       { .fw_name = "xo" },
+       { .hw = &ubi32_pll.clkr.hw },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll2.clkr.hw },
+       { .hw = &gpll4.clkr.hw },
+       { .hw = &gpll6.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_ubi32_gpll0_gpll2_gpll4_gpll6_map[] = {
+       { P_XO, 0 },
+       { P_UBI32_PLL, 1 },
+       { P_GPLL0, 2 },
+       { P_GPLL2, 3 },
+       { P_GPLL4, 4 },
+       { P_GPLL6, 5 },
+};
+
+static struct clk_rcg2 nss_ubi0_clk_src = {
+       .cmd_rcgr = 0x68104,
+       .freq_tbl = ftbl_nss_ubi_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_ubi32_gpll0_gpll2_gpll4_gpll6_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "nss_ubi0_clk_src",
+               .parent_data = gcc_xo_ubi32_pll_gpll0_gpll2_gpll4_gpll6,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static const struct freq_tbl ftbl_adss_pwm_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 adss_pwm_clk_src = {
+       .cmd_rcgr = 0x1c008,
+       .freq_tbl = ftbl_adss_pwm_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "adss_pwm_clk_src",
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_i2c_apps_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+       F(50000000, P_GPLL0, 16, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0200c,
+       .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup1_i2c_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src[] = {
+       F(960000, P_XO, 10, 2, 5),
+       F(4800000, P_XO, 5, 0, 0),
+       F(9600000, P_XO, 2, 4, 5),
+       F(12500000, P_GPLL0_DIV2, 16, 1, 2),
+       F(16000000, P_GPLL0, 10, 1, 5),
+       F(24000000, P_XO, 1, 0, 0),
+       F(25000000, P_GPLL0, 16, 1, 2),
+       F(50000000, P_GPLL0, 16, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+       .cmd_rcgr = 0x02024,
+       .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup1_spi_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x03000,
+       .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup2_i2c_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+       .cmd_rcgr = 0x03014,
+       .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup2_spi_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x04000,
+       .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup3_i2c_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+       .cmd_rcgr = 0x04014,
+       .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup3_spi_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x05000,
+       .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup4_i2c_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+       .cmd_rcgr = 0x05014,
+       .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup4_spi_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x06000,
+       .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup5_i2c_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+       .cmd_rcgr = 0x06014,
+       .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup5_spi_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x07000,
+       .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup6_i2c_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+       .cmd_rcgr = 0x07014,
+       .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup6_spi_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart_apps_clk_src[] = {
+       F(3686400, P_GPLL0_DIV2, 1, 144, 15625),
+       F(7372800, P_GPLL0_DIV2, 1, 288, 15625),
+       F(14745600, P_GPLL0_DIV2, 1, 576, 15625),
+       F(16000000, P_GPLL0_DIV2, 5, 1, 5),
+       F(24000000, P_XO, 1, 0, 0),
+       F(24000000, P_GPLL0, 1, 3, 100),
+       F(25000000, P_GPLL0, 16, 1, 2),
+       F(32000000, P_GPLL0, 1, 1, 25),
+       F(40000000, P_GPLL0, 1, 1, 20),
+       F(46400000, P_GPLL0, 1, 29, 500),
+       F(48000000, P_GPLL0, 1, 3, 50),
+       F(51200000, P_GPLL0, 1, 8, 125),
+       F(56000000, P_GPLL0, 1, 7, 100),
+       F(58982400, P_GPLL0, 1, 1152, 15625),
+       F(60000000, P_GPLL0, 1, 3, 40),
+       F(64000000, P_GPLL0, 12.5, 1, 1),
+       { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+       .cmd_rcgr = 0x02044,
+       .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart1_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+       .cmd_rcgr = 0x03034,
+       .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart2_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+       .cmd_rcgr = 0x04034,
+       .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart3_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+       .cmd_rcgr = 0x05034,
+       .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart4_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+       .cmd_rcgr = 0x06034,
+       .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart5_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+       .cmd_rcgr = 0x07034,
+       .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart6_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+       F(40000000, P_GPLL0_DIV2, 10, 0, 0),
+       F(80000000, P_GPLL0, 10, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       F(160000000, P_GPLL0, 5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+       .cmd_rcgr = 0x16004,
+       .freq_tbl = ftbl_crypto_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "crypto_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gp_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       F(200000000, P_GPLL0, 4, 0, 0),
+       F(266666666, P_GPLL0, 3, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_sleep_clk[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll6.clkr.hw },
+       { .hw = &gpll0_out_main_div2.hw },
+       { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL6, 2 },
+       { P_GPLL0_DIV2, 4 },
+       { P_SLEEP_CLK, 6 },
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+       .cmd_rcgr = 0x08004,
+       .freq_tbl = ftbl_gp_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gp1_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+       .cmd_rcgr = 0x09004,
+       .freq_tbl = ftbl_gp_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gp2_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+       .cmd_rcgr = 0x0a004,
+       .freq_tbl = ftbl_gp_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gp3_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_fixed_factor nss_ppe_cdiv_clk_src = {
+       .mult = 1,
+       .div = 4,
+       .hw.init = &(struct clk_init_data){
+               .name = "nss_ppe_cdiv_clk_src",
+               .parent_hws = (const struct clk_hw *[]){
+                               &nss_ppe_clk_src.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_fixed_factor_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_regmap_div nss_ubi0_div_clk_src = {
+       .reg = 0x68118,
+       .shift = 0,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_ubi0_div_clk_src",
+                       .parent_hws = (const struct clk_hw *[]){
+                               &nss_ubi0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ro_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+       { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_core_pi_sleep_clk_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 2 },
+       { P_PI_SLEEP, 6 },
+};
+
+static struct clk_rcg2 pcie0_aux_clk_src = {
+       .cmd_rcgr = 0x75024,
+       .freq_tbl = ftbl_pcie_aux_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pcie0_aux_clk_src",
+               .parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct clk_parent_data gcc_pcie20_phy0_pipe_clk_xo[] = {
+       { .fw_name = "pcie20_phy0_pipe_clk" },
+       { .fw_name = "xo" },
+};
+
+static const struct parent_map gcc_pcie20_phy0_pipe_clk_xo_map[] = {
+       { P_PCIE20_PHY0_PIPE, 0 },
+       { P_XO, 2 },
+};
+
+static struct clk_regmap_mux pcie0_pipe_clk_src = {
+       .reg = 0x7501c,
+       .shift = 8,
+       .width = 2,
+       .parent_map = gcc_pcie20_phy0_pipe_clk_xo_map,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie0_pipe_clk_src",
+                       .parent_data = gcc_pcie20_phy0_pipe_clk_xo,
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_sdcc_apps_clk_src[] = {
+       F(144000, P_XO, 16, 12, 125),
+       F(400000, P_XO, 12, 1, 5),
+       F(24000000, P_GPLL2, 12, 1, 4),
+       F(48000000, P_GPLL2, 12, 1, 2),
+       F(96000000, P_GPLL2, 12, 0, 0),
+       F(177777778, P_GPLL0, 4.5, 0, 0),
+       F(192000000, P_GPLL2, 6, 0, 0),
+       F(384000000, P_GPLL2, 3, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data
+                       gcc_xo_gpll0_gpll2_gpll0_out_main_div2[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll2.clkr.hw },
+       { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL2, 2 },
+       { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+       .cmd_rcgr = 0x42004,
+       .freq_tbl = ftbl_sdcc_apps_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sdcc1_apps_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_usb_aux_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 usb0_aux_clk_src = {
+       .cmd_rcgr = 0x3e05c,
+       .freq_tbl = ftbl_usb_aux_clk_src,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb0_aux_clk_src",
+               .parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_usb_mock_utmi_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(60000000, P_GPLL6, 6, 1, 3),
+       { }
+};
+
+static const struct clk_parent_data
+                       gcc_xo_gpll6_gpll0_gpll0_out_main_div2[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll6.clkr.hw },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map[] = {
+       { P_XO, 0 },
+       { P_GPLL6, 1 },
+       { P_GPLL0, 3 },
+       { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 usb0_mock_utmi_clk_src = {
+       .cmd_rcgr = 0x3e020,
+       .freq_tbl = ftbl_usb_mock_utmi_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb0_mock_utmi_clk_src",
+               .parent_data = gcc_xo_gpll6_gpll0_gpll0_out_main_div2,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct clk_parent_data gcc_usb3phy_0_cc_pipe_clk_xo[] = {
+       { .fw_name = "usb3phy_0_cc_pipe_clk" },
+       { .fw_name = "xo" },
+};
+
+static const struct parent_map gcc_usb3phy_0_cc_pipe_clk_xo_map[] = {
+       { P_USB3PHY_0_PIPE, 0 },
+       { P_XO, 2 },
+};
+
+static struct clk_regmap_mux usb0_pipe_clk_src = {
+       .reg = 0x3e048,
+       .shift = 8,
+       .width = 2,
+       .parent_map = gcc_usb3phy_0_cc_pipe_clk_xo_map,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb0_pipe_clk_src",
+                       .parent_data = gcc_usb3phy_0_cc_pipe_clk_xo,
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+       F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+       F(160000000, P_GPLL0, 5, 0, 0),
+       F(216000000, P_GPLL6, 5, 0, 0),
+       F(308570000, P_GPLL6, 3.5, 0, 0),
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
+       { .fw_name = "xo"},
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll6.clkr.hw },
+       { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_div2_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL6, 2 },
+       { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+       .cmd_rcgr = 0x5d000,
+       .freq_tbl = ftbl_sdcc_ice_core_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll6_gpll0_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sdcc1_ice_core_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll6_gpll0_div2,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_qdss_stm_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       F(200000000, P_GPLL0, 4, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 qdss_stm_clk_src = {
+       .cmd_rcgr = 0x2902C,
+       .freq_tbl = ftbl_qdss_stm_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "qdss_stm_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_qdss_traceclkin_clk_src[] = {
+       F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+       F(160000000, P_GPLL0, 5, 0, 0),
+       F(300000000, P_GPLL4, 4, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll4_gpll0_gpll0_div2[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll4.clkr.hw },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll0_div2_map[] = {
+       { P_XO, 0 },
+       { P_GPLL4, 1 },
+       { P_GPLL0, 2 },
+       { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 qdss_traceclkin_clk_src = {
+       .cmd_rcgr = 0x29048,
+       .freq_tbl = ftbl_qdss_traceclkin_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "qdss_traceclkin_clk_src",
+               .parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 usb1_mock_utmi_clk_src = {
+       .cmd_rcgr = 0x3f020,
+       .freq_tbl = ftbl_usb_mock_utmi_clk_src,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb1_mock_utmi_clk_src",
+               .parent_data = gcc_xo_gpll6_gpll0_gpll0_out_main_div2,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+       .halt_reg = 0x1c020,
+       .clkr = {
+               .enable_reg = 0x1c020,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_adss_pwm_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &adss_pwm_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_apss_ahb_clk = {
+       .halt_reg = 0x4601c,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x0b004,
+               .enable_mask = BIT(14),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_apss_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &apss_ahb_postdiv_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_system_noc_bfdcd_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       F(133333333, P_GPLL0, 6, 0, 0),
+       F(160000000, P_GPLL0, 5, 0, 0),
+       F(200000000, P_GPLL0, 4, 0, 0),
+       F(266666667, P_GPLL0, 3, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+       .cmd_rcgr = 0x26004,
+       .freq_tbl = ftbl_system_noc_bfdcd_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "system_noc_bfdcd_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll6_gpll0_out_main_div2,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_ubi32_mem_noc_bfdcd_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(307670000, P_BIAS_PLL_NSS_NOC, 1.5, 0, 0),
+       F(533333333, P_GPLL0, 1.5, 0, 0),
+       { }
+};
+
+static const struct clk_parent_data
+                       gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll2.clkr.hw },
+       { .fw_name = "bias_pll_nss_noc_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL2, 3 },
+       { P_BIAS_PLL_NSS_NOC, 4 },
+};
+
+static struct clk_rcg2 ubi32_mem_noc_bfdcd_clk_src = {
+       .cmd_rcgr = 0x68088,
+       .freq_tbl = ftbl_ubi32_mem_noc_bfdcd_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ubi32_mem_noc_bfdcd_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_apss_axi_clk = {
+       .halt_reg = 0x46020,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x0b004,
+               .enable_mask = BIT(13),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_apss_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &apss_axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+       .halt_reg = 0x01008,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x0b004,
+               .enable_mask = BIT(10),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+       .halt_reg = 0x02008,
+       .clkr = {
+               .enable_reg = 0x02008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup1_i2c_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup1_i2c_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+       .halt_reg = 0x02004,
+       .clkr = {
+               .enable_reg = 0x02004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup1_spi_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup1_spi_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+       .halt_reg = 0x03010,
+       .clkr = {
+               .enable_reg = 0x03010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup2_i2c_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup2_i2c_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+       .halt_reg = 0x0300c,
+       .clkr = {
+               .enable_reg = 0x0300c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup2_spi_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup2_spi_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+       .halt_reg = 0x04010,
+       .clkr = {
+               .enable_reg = 0x04010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup3_i2c_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup3_i2c_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+       .halt_reg = 0x0400c,
+       .clkr = {
+               .enable_reg = 0x0400c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup3_spi_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup3_spi_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+       .halt_reg = 0x05010,
+       .clkr = {
+               .enable_reg = 0x05010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup4_i2c_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup4_i2c_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+       .halt_reg = 0x0500c,
+       .clkr = {
+               .enable_reg = 0x0500c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup4_spi_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup4_spi_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+       .halt_reg = 0x06010,
+       .clkr = {
+               .enable_reg = 0x06010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup5_i2c_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup5_i2c_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+       .halt_reg = 0x0600c,
+       .clkr = {
+               .enable_reg = 0x0600c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup5_spi_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup5_spi_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+       .halt_reg = 0x0700c,
+       .clkr = {
+               .enable_reg = 0x0700c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup6_spi_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_qup6_spi_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+       .halt_reg = 0x0203c,
+       .clkr = {
+               .enable_reg = 0x0203c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart1_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_uart1_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+       .halt_reg = 0x0302c,
+       .clkr = {
+               .enable_reg = 0x0302c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart2_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_uart2_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+       .halt_reg = 0x0402c,
+       .clkr = {
+               .enable_reg = 0x0402c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart3_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_uart3_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+       .halt_reg = 0x0502c,
+       .clkr = {
+               .enable_reg = 0x0502c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart4_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_uart4_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+       .halt_reg = 0x0602c,
+       .clkr = {
+               .enable_reg = 0x0602c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart5_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_uart5_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+       .halt_reg = 0x0702c,
+       .clkr = {
+               .enable_reg = 0x0702c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart6_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &blsp1_uart6_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+       .halt_reg = 0x16024,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x0b004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_crypto_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+       .halt_reg = 0x16020,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x0b004,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_crypto_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+       .halt_reg = 0x1601c,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x0b004,
+               .enable_mask = BIT(2),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_crypto_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &crypto_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_fixed_factor gpll6_out_main_div2 = {
+       .mult = 1,
+       .div = 2,
+       .hw.init = &(struct clk_init_data){
+               .name = "gpll6_out_main_div2",
+               .parent_hws = (const struct clk_hw *[]){
+                               &gpll6_main.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_fixed_factor_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_branch gcc_xo_clk = {
+       .halt_reg = 0x30030,
+       .clkr = {
+               .enable_reg = 0x30030,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_xo_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gcc_xo_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+       .halt_reg = 0x08000,
+       .clkr = {
+               .enable_reg = 0x08000,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_gp1_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gp1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+       .halt_reg = 0x09000,
+       .clkr = {
+               .enable_reg = 0x09000,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_gp2_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gp2_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+       .halt_reg = 0x0a000,
+       .clkr = {
+               .enable_reg = 0x0a000,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_gp3_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gp3_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_mdio_ahb_clk = {
+       .halt_reg = 0x58004,
+       .clkr = {
+               .enable_reg = 0x58004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_mdio_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_crypto_ppe_clk = {
+       .halt_reg = 0x68310,
+       .clkr = {
+               .enable_reg = 0x68310,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_crypto_ppe_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_ce_apb_clk = {
+       .halt_reg = 0x68174,
+       .clkr = {
+               .enable_reg = 0x68174,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_ce_apb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ce_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_ce_axi_clk = {
+       .halt_reg = 0x68170,
+       .clkr = {
+               .enable_reg = 0x68170,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_ce_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ce_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_cfg_clk = {
+       .halt_reg = 0x68160,
+       .clkr = {
+               .enable_reg = 0x68160,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_cfg_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_crypto_clk = {
+       .halt_reg = 0x68164,
+       .clkr = {
+               .enable_reg = 0x68164,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_crypto_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_crypto_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_csr_clk = {
+       .halt_reg = 0x68318,
+       .clkr = {
+               .enable_reg = 0x68318,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_csr_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ce_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_edma_cfg_clk = {
+       .halt_reg = 0x6819C,
+       .clkr = {
+               .enable_reg = 0x6819C,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_edma_cfg_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_edma_clk = {
+       .halt_reg = 0x68198,
+       .clkr = {
+               .enable_reg = 0x68198,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_edma_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_noc_clk = {
+       .halt_reg = 0x68168,
+       .clkr = {
+               .enable_reg = 0x68168,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_noc_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ubi0_utcm_clk = {
+       .halt_reg = 0x2606c,
+       .clkr = {
+               .enable_reg = 0x2606c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ubi0_utcm_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_snoc_nssnoc_clk = {
+       .halt_reg = 0x26070,
+       .clkr = {
+               .enable_reg = 0x26070,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_snoc_nssnoc_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_wcss_ahb_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(133333333, P_GPLL0, 6, 0, 0),
+       { }
+};
+
+static const struct freq_tbl ftbl_q6_axi_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(400000000, P_GPLL0, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 wcss_ahb_clk_src = {
+       .cmd_rcgr = 0x59020,
+       .freq_tbl = ftbl_wcss_ahb_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "wcss_ahb_clk_src",
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2_gpll4_gpll6[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll2.clkr.hw },
+       { .hw = &gpll4.clkr.hw },
+       { .hw = &gpll6.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll4_gpll6_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL2, 2 },
+       { P_GPLL4, 3 },
+       { P_GPLL6, 4 },
+};
+
+static struct clk_rcg2 q6_axi_clk_src = {
+       .cmd_rcgr = 0x59120,
+       .freq_tbl = ftbl_q6_axi_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll2_gpll4_gpll6_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "q6_axi_clk_src",
+               .parent_data = gcc_xo_gpll0_gpll2_gpll4_gpll6,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_lpass_core_axim_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 lpass_core_axim_clk_src = {
+       .cmd_rcgr = 0x1F020,
+       .freq_tbl = ftbl_lpass_core_axim_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "lpass_core_axim_clk_src",
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_lpass_snoc_cfg_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(266666667, P_GPLL0, 3, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 lpass_snoc_cfg_clk_src = {
+       .cmd_rcgr = 0x1F040,
+       .freq_tbl = ftbl_lpass_snoc_cfg_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "lpass_snoc_cfg_clk_src",
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_lpass_q6_axim_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(400000000, P_GPLL0, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 lpass_q6_axim_clk_src = {
+       .cmd_rcgr = 0x1F008,
+       .freq_tbl = ftbl_lpass_q6_axim_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "lpass_q6_axim_clk_src",
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(50000000, P_GPLL0, 16, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 rbcpr_wcss_clk_src = {
+       .cmd_rcgr = 0x3a00c,
+       .freq_tbl = ftbl_rbcpr_wcss_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "rbcpr_wcss_clk_src",
+               .parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_lpass_core_axim_clk = {
+       .halt_reg = 0x1F028,
+       .clkr = {
+               .enable_reg = 0x1F028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_core_axim_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &lpass_core_axim_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_lpass_snoc_cfg_clk = {
+       .halt_reg = 0x1F048,
+       .clkr = {
+               .enable_reg = 0x1F048,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_snoc_cfg_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &lpass_snoc_cfg_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_lpass_q6_axim_clk = {
+       .halt_reg = 0x1F010,
+       .clkr = {
+               .enable_reg = 0x1F010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_q6_axim_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &lpass_q6_axim_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_lpass_q6_atbm_at_clk = {
+       .halt_reg = 0x1F018,
+       .clkr = {
+               .enable_reg = 0x1F018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_q6_atbm_at_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &qdss_at_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_lpass_q6_pclkdbg_clk = {
+       .halt_reg = 0x1F01C,
+       .clkr = {
+               .enable_reg = 0x1F01C,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_q6_pclkdbg_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &qdss_dap_sync_clk_src.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_lpass_q6ss_tsctr_1to2_clk = {
+       .halt_reg = 0x1F014,
+       .clkr = {
+               .enable_reg = 0x1F014,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_q6ss_tsctr_1to2_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &qdss_tsctr_div2_clk_src.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_lpass_q6ss_trig_clk = {
+       .halt_reg = 0x1F038,
+       .clkr = {
+               .enable_reg = 0x1F038,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_q6ss_trig_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &qdss_dap_sync_clk_src.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_lpass_tbu_clk = {
+       .halt_reg = 0x12094,
+       .clkr = {
+               .enable_reg = 0xb00c,
+               .enable_mask = BIT(10),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_tbu_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &lpass_q6_axim_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcnoc_lpass_clk = {
+       .halt_reg = 0x27020,
+       .clkr = {
+               .enable_reg = 0x27020,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcnoc_lpass_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &lpass_core_axim_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_mem_noc_lpass_clk = {
+       .halt_reg = 0x1D044,
+       .clkr = {
+               .enable_reg = 0x1D044,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_mem_noc_lpass_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &lpass_q6_axim_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_snoc_lpass_cfg_clk = {
+       .halt_reg = 0x26074,
+       .clkr = {
+               .enable_reg = 0x26074,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_snoc_lpass_cfg_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &lpass_snoc_cfg_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_mem_noc_ubi32_clk = {
+       .halt_reg = 0x1D03C,
+       .clkr = {
+               .enable_reg = 0x1D03C,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_mem_noc_ubi32_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &ubi32_mem_noc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port1_rx_clk = {
+       .halt_reg = 0x68240,
+       .clkr = {
+               .enable_reg = 0x68240,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port1_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port1_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port1_tx_clk = {
+       .halt_reg = 0x68244,
+       .clkr = {
+               .enable_reg = 0x68244,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port1_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port1_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port2_rx_clk = {
+       .halt_reg = 0x68248,
+       .clkr = {
+               .enable_reg = 0x68248,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port2_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port2_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port2_tx_clk = {
+       .halt_reg = 0x6824c,
+       .clkr = {
+               .enable_reg = 0x6824c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port2_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port2_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port3_rx_clk = {
+       .halt_reg = 0x68250,
+       .clkr = {
+               .enable_reg = 0x68250,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port3_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port3_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port3_tx_clk = {
+       .halt_reg = 0x68254,
+       .clkr = {
+               .enable_reg = 0x68254,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port3_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port3_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port4_rx_clk = {
+       .halt_reg = 0x68258,
+       .clkr = {
+               .enable_reg = 0x68258,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port4_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port4_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port4_tx_clk = {
+       .halt_reg = 0x6825c,
+       .clkr = {
+               .enable_reg = 0x6825c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port4_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port4_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port5_rx_clk = {
+       .halt_reg = 0x68260,
+       .clkr = {
+               .enable_reg = 0x68260,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port5_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port5_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_port5_tx_clk = {
+       .halt_reg = 0x68264,
+       .clkr = {
+               .enable_reg = 0x68264,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_port5_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port5_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_ppe_cfg_clk = {
+       .halt_reg = 0x68194,
+       .clkr = {
+               .enable_reg = 0x68194,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_ppe_cfg_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_ppe_clk = {
+       .halt_reg = 0x68190,
+       .clkr = {
+               .enable_reg = 0x68190,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_ppe_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_ppe_ipe_clk = {
+       .halt_reg = 0x68338,
+       .clkr = {
+               .enable_reg = 0x68338,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_ppe_ipe_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nss_ptp_ref_clk = {
+       .halt_reg = 0x6816C,
+       .clkr = {
+               .enable_reg = 0x6816C,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nss_ptp_ref_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_cdiv_clk_src.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_ce_apb_clk = {
+       .halt_reg = 0x6830C,
+       .clkr = {
+               .enable_reg = 0x6830C,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_ce_apb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ce_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_ce_axi_clk = {
+       .halt_reg = 0x68308,
+       .clkr = {
+               .enable_reg = 0x68308,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_ce_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ce_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_crypto_clk = {
+       .halt_reg = 0x68314,
+       .clkr = {
+               .enable_reg = 0x68314,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_crypto_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_crypto_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_ppe_cfg_clk = {
+       .halt_reg = 0x68304,
+       .clkr = {
+               .enable_reg = 0x68304,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_ppe_cfg_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_ppe_clk = {
+       .halt_reg = 0x68300,
+       .clkr = {
+               .enable_reg = 0x68300,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_ppe_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_qosgen_ref_clk = {
+       .halt_reg = 0x68180,
+       .clkr = {
+               .enable_reg = 0x68180,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_qosgen_ref_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gcc_xo_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_clk = {
+       .halt_reg = 0x68188,
+       .clkr = {
+               .enable_reg = 0x68188,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_snoc_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &system_noc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_timeout_ref_clk = {
+       .halt_reg = 0x68184,
+       .clkr = {
+               .enable_reg = 0x68184,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_timeout_ref_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gcc_xo_div4_clk_src.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_nssnoc_ubi0_ahb_clk = {
+       .halt_reg = 0x68270,
+       .clkr = {
+               .enable_reg = 0x68270,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_nssnoc_ubi0_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ce_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_port1_mac_clk = {
+       .halt_reg = 0x68320,
+       .clkr = {
+               .enable_reg = 0x68320,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_port1_mac_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_port2_mac_clk = {
+       .halt_reg = 0x68324,
+       .clkr = {
+               .enable_reg = 0x68324,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_port2_mac_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_port3_mac_clk = {
+       .halt_reg = 0x68328,
+       .clkr = {
+               .enable_reg = 0x68328,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_port3_mac_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_port4_mac_clk = {
+       .halt_reg = 0x6832c,
+       .clkr = {
+               .enable_reg = 0x6832c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_port4_mac_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_port5_mac_clk = {
+       .halt_reg = 0x68330,
+       .clkr = {
+               .enable_reg = 0x68330,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_port5_mac_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ppe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ubi0_ahb_clk = {
+       .halt_reg = 0x6820C,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x6820C,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ubi0_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ce_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ubi0_axi_clk = {
+       .halt_reg = 0x68200,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x68200,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ubi0_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &ubi32_mem_noc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ubi0_nc_axi_clk = {
+       .halt_reg = 0x68204,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x68204,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ubi0_nc_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ubi0_core_clk = {
+       .halt_reg = 0x68210,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x68210,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ubi0_core_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_ubi0_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie0_ahb_clk = {
+       .halt_reg = 0x75010,
+       .clkr = {
+               .enable_reg = 0x75010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie0_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie0_aux_clk = {
+       .halt_reg = 0x75014,
+       .clkr = {
+               .enable_reg = 0x75014,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie0_aux_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcie0_aux_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie0_axi_m_clk = {
+       .halt_reg = 0x75008,
+       .clkr = {
+               .enable_reg = 0x75008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie0_axi_m_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcie0_axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_clk = {
+       .halt_reg = 0x7500c,
+       .clkr = {
+               .enable_reg = 0x7500c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie0_axi_s_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcie0_axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sys_noc_pcie0_axi_clk = {
+       .halt_reg = 0x26048,
+       .clkr = {
+               .enable_reg = 0x26048,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sys_noc_pcie0_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcie0_axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie0_pipe_clk = {
+       .halt_reg = 0x75018,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x75018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie0_pipe_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcie0_pipe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+       .halt_reg = 0x13004,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x0b004,
+               .enable_mask = BIT(8),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_prng_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+       .halt_reg = 0x29084,
+       .clkr = {
+               .enable_reg = 0x29084,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_qdss_dap_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &qdss_dap_sync_clk_src.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+       .halt_reg = 0x57024,
+       .clkr = {
+               .enable_reg = 0x57024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_qpic_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+       .halt_reg = 0x57020,
+       .clkr = {
+               .enable_reg = 0x57020,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_qpic_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+       .halt_reg = 0x4201c,
+       .clkr = {
+               .enable_reg = 0x4201c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc1_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+       .halt_reg = 0x42018,
+       .clkr = {
+               .enable_reg = 0x42018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc1_apps_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &sdcc1_apps_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_ahb_clk = {
+       .halt_reg = 0x56008,
+       .clkr = {
+               .enable_reg = 0x56008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port1_rx_clk = {
+       .halt_reg = 0x56010,
+       .clkr = {
+               .enable_reg = 0x56010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port1_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port1_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port1_tx_clk = {
+       .halt_reg = 0x56014,
+       .clkr = {
+               .enable_reg = 0x56014,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port1_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port1_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port2_rx_clk = {
+       .halt_reg = 0x56018,
+       .clkr = {
+               .enable_reg = 0x56018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port2_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port2_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port2_tx_clk = {
+       .halt_reg = 0x5601c,
+       .clkr = {
+               .enable_reg = 0x5601c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port2_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port2_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port3_rx_clk = {
+       .halt_reg = 0x56020,
+       .clkr = {
+               .enable_reg = 0x56020,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port3_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port3_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port3_tx_clk = {
+       .halt_reg = 0x56024,
+       .clkr = {
+               .enable_reg = 0x56024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port3_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port3_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port4_rx_clk = {
+       .halt_reg = 0x56028,
+       .clkr = {
+               .enable_reg = 0x56028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port4_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port4_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port4_tx_clk = {
+       .halt_reg = 0x5602c,
+       .clkr = {
+               .enable_reg = 0x5602c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port4_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port4_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port5_rx_clk = {
+       .halt_reg = 0x56030,
+       .clkr = {
+               .enable_reg = 0x56030,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port5_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port5_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_port5_tx_clk = {
+       .halt_reg = 0x56034,
+       .clkr = {
+               .enable_reg = 0x56034,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_port5_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port5_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy0_sys_clk = {
+       .halt_reg = 0x5600C,
+       .clkr = {
+               .enable_reg = 0x5600C,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy0_sys_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gcc_xo_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy1_ahb_clk = {
+       .halt_reg = 0x56108,
+       .clkr = {
+               .enable_reg = 0x56108,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy1_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy1_port5_rx_clk = {
+       .halt_reg = 0x56110,
+       .clkr = {
+               .enable_reg = 0x56110,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy1_port5_rx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port5_rx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy1_port5_tx_clk = {
+       .halt_reg = 0x56114,
+       .clkr = {
+               .enable_reg = 0x56114,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy1_port5_tx_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &nss_port5_tx_div_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_uniphy1_sys_clk = {
+       .halt_reg = 0x5610C,
+       .clkr = {
+               .enable_reg = 0x5610C,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_uniphy1_sys_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gcc_xo_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb0_aux_clk = {
+       .halt_reg = 0x3e044,
+       .clkr = {
+               .enable_reg = 0x3e044,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb0_aux_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &usb0_aux_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb0_master_clk = {
+       .halt_reg = 0x3e000,
+       .clkr = {
+               .enable_reg = 0x3e000,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb0_master_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &usb0_master_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = {
+       .halt_reg = 0x47014,
+       .clkr = {
+               .enable_reg = 0x47014,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_snoc_bus_timeout2_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &usb0_master_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_rcg2 pcie0_rchng_clk_src = {
+       .cmd_rcgr = 0x75070,
+       .freq_tbl = ftbl_pcie_rchng_clk_src,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pcie0_rchng_clk_src",
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_pcie0_rchng_clk = {
+       .halt_reg = 0x75070,
+       .clkr = {
+               .enable_reg = 0x75070,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie0_rchng_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcie0_rchng_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
+       .halt_reg = 0x75048,
+       .clkr = {
+               .enable_reg = 0x75048,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie0_axi_s_bridge_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcie0_axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sys_noc_usb0_axi_clk = {
+       .halt_reg = 0x26040,
+       .clkr = {
+               .enable_reg = 0x26040,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sys_noc_usb0_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &usb0_master_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb0_mock_utmi_clk = {
+       .halt_reg = 0x3e008,
+       .clkr = {
+               .enable_reg = 0x3e008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb0_mock_utmi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &usb0_mock_utmi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = {
+       .halt_reg = 0x3e080,
+       .clkr = {
+               .enable_reg = 0x3e080,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb0_phy_cfg_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb0_pipe_clk = {
+       .halt_reg = 0x3e040,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x3e040,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb0_pipe_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &usb0_pipe_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb0_sleep_clk = {
+       .halt_reg = 0x3e004,
+       .clkr = {
+               .enable_reg = 0x3e004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb0_sleep_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gcc_sleep_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb1_master_clk = {
+       .halt_reg = 0x3f000,
+       .clkr = {
+               .enable_reg = 0x3f000,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb1_master_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb1_mock_utmi_clk = {
+       .halt_reg = 0x3f008,
+       .clkr = {
+               .enable_reg = 0x3f008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb1_mock_utmi_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &usb1_mock_utmi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb1_phy_cfg_ahb_clk = {
+       .halt_reg = 0x3f080,
+       .clkr = {
+               .enable_reg = 0x3f080,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb1_phy_cfg_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb1_sleep_clk = {
+       .halt_reg = 0x3f004,
+       .clkr = {
+               .enable_reg = 0x3f004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb1_sleep_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gcc_sleep_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_cmn_12gpll_ahb_clk = {
+       .halt_reg = 0x56308,
+       .clkr = {
+               .enable_reg = 0x56308,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_cmn_12gpll_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_cmn_12gpll_sys_clk = {
+       .halt_reg = 0x5630c,
+       .clkr = {
+               .enable_reg = 0x5630c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_cmn_12gpll_sys_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &gcc_xo_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+       .halt_reg = 0x5d014,
+       .clkr = {
+               .enable_reg = 0x5d014,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc1_ice_core_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &sdcc1_ice_core_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_dcc_clk = {
+       .halt_reg = 0x77004,
+       .clkr = {
+               .enable_reg = 0x77004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_dcc_clk",
+                       .parent_hws = (const struct clk_hw *[]){
+                                       &pcnoc_bfdcd_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+       .l = 0x3e,
+       .alpha = 0x57,
+       .config_ctl_val = 0x240d6aa8,
+       .config_ctl_hi_val = 0x3c2,
+       .main_output_mask = BIT(0),
+       .aux_output_mask = BIT(1),
+       .pre_div_val = 0x0,
+       .pre_div_mask = BIT(12),
+       .post_div_val = 0x0,
+       .post_div_mask = GENMASK(9, 8),
+};
+
+static const struct alpha_pll_config nss_crypto_pll_config = {
+       .l = 0x32,
+       .alpha = 0x0,
+       .alpha_hi = 0x0,
+       .config_ctl_val = 0x4001055b,
+       .main_output_mask = BIT(0),
+       .pre_div_val = 0x0,
+       .pre_div_mask = GENMASK(14, 12),
+       .post_div_val = 0x1 << 8,
+       .post_div_mask = GENMASK(11, 8),
+       .vco_mask = GENMASK(21, 20),
+       .vco_val = 0x0,
+       .alpha_en_mask = BIT(24),
+};
+
+static struct clk_hw *gcc_ipq6018_hws[] = {
+       &gpll0_out_main_div2.hw,
+       &gcc_xo_div4_clk_src.hw,
+       &nss_ppe_cdiv_clk_src.hw,
+       &gpll6_out_main_div2.hw,
+       &qdss_dap_sync_clk_src.hw,
+       &qdss_tsctr_div2_clk_src.hw,
+};
+
+static struct clk_regmap *gcc_ipq6018_clks[] = {
+       [GPLL0_MAIN] = &gpll0_main.clkr,
+       [GPLL0] = &gpll0.clkr,
+       [UBI32_PLL_MAIN] = &ubi32_pll_main.clkr,
+       [UBI32_PLL] = &ubi32_pll.clkr,
+       [GPLL6_MAIN] = &gpll6_main.clkr,
+       [GPLL6] = &gpll6.clkr,
+       [GPLL4_MAIN] = &gpll4_main.clkr,
+       [GPLL4] = &gpll4.clkr,
+       [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+       [GPLL2_MAIN] = &gpll2_main.clkr,
+       [GPLL2] = &gpll2.clkr,
+       [NSS_CRYPTO_PLL_MAIN] = &nss_crypto_pll_main.clkr,
+       [NSS_CRYPTO_PLL] = &nss_crypto_pll.clkr,
+       [QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr,
+       [QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr,
+       [NSS_PPE_CLK_SRC] = &nss_ppe_clk_src.clkr,
+       [GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+       [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+       [SNOC_NSSNOC_BFDCD_CLK_SRC] = &snoc_nssnoc_bfdcd_clk_src.clkr,
+       [NSS_CE_CLK_SRC] = &nss_ce_clk_src.clkr,
+       [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+       [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+       [NSS_PORT5_RX_CLK_SRC] = &nss_port5_rx_clk_src.clkr,
+       [NSS_PORT5_TX_CLK_SRC] = &nss_port5_tx_clk_src.clkr,
+       [UBI32_MEM_NOC_BFDCD_CLK_SRC] = &ubi32_mem_noc_bfdcd_clk_src.clkr,
+       [PCIE0_AXI_CLK_SRC] = &pcie0_axi_clk_src.clkr,
+       [USB0_MASTER_CLK_SRC] = &usb0_master_clk_src.clkr,
+       [APSS_AHB_POSTDIV_CLK_SRC] = &apss_ahb_postdiv_clk_src.clkr,
+       [NSS_PORT1_RX_CLK_SRC] = &nss_port1_rx_clk_src.clkr,
+       [NSS_PORT1_TX_CLK_SRC] = &nss_port1_tx_clk_src.clkr,
+       [NSS_PORT2_RX_CLK_SRC] = &nss_port2_rx_clk_src.clkr,
+       [NSS_PORT2_TX_CLK_SRC] = &nss_port2_tx_clk_src.clkr,
+       [NSS_PORT3_RX_CLK_SRC] = &nss_port3_rx_clk_src.clkr,
+       [NSS_PORT3_TX_CLK_SRC] = &nss_port3_tx_clk_src.clkr,
+       [NSS_PORT4_RX_CLK_SRC] = &nss_port4_rx_clk_src.clkr,
+       [NSS_PORT4_TX_CLK_SRC] = &nss_port4_tx_clk_src.clkr,
+       [NSS_PORT5_RX_DIV_CLK_SRC] = &nss_port5_rx_div_clk_src.clkr,
+       [NSS_PORT5_TX_DIV_CLK_SRC] = &nss_port5_tx_div_clk_src.clkr,
+       [APSS_AXI_CLK_SRC] = &apss_axi_clk_src.clkr,
+       [NSS_CRYPTO_CLK_SRC] = &nss_crypto_clk_src.clkr,
+       [NSS_PORT1_RX_DIV_CLK_SRC] = &nss_port1_rx_div_clk_src.clkr,
+       [NSS_PORT1_TX_DIV_CLK_SRC] = &nss_port1_tx_div_clk_src.clkr,
+       [NSS_PORT2_RX_DIV_CLK_SRC] = &nss_port2_rx_div_clk_src.clkr,
+       [NSS_PORT2_TX_DIV_CLK_SRC] = &nss_port2_tx_div_clk_src.clkr,
+       [NSS_PORT3_RX_DIV_CLK_SRC] = &nss_port3_rx_div_clk_src.clkr,
+       [NSS_PORT3_TX_DIV_CLK_SRC] = &nss_port3_tx_div_clk_src.clkr,
+       [NSS_PORT4_RX_DIV_CLK_SRC] = &nss_port4_rx_div_clk_src.clkr,
+       [NSS_PORT4_TX_DIV_CLK_SRC] = &nss_port4_tx_div_clk_src.clkr,
+       [NSS_UBI0_CLK_SRC] = &nss_ubi0_clk_src.clkr,
+       [ADSS_PWM_CLK_SRC] = &adss_pwm_clk_src.clkr,
+       [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+       [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+       [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+       [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+       [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+       [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+       [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+       [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+       [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+       [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+       [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+       [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+       [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+       [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+       [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+       [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+       [NSS_UBI0_DIV_CLK_SRC] = &nss_ubi0_div_clk_src.clkr,
+       [PCIE0_AUX_CLK_SRC] = &pcie0_aux_clk_src.clkr,
+       [PCIE0_PIPE_CLK_SRC] = &pcie0_pipe_clk_src.clkr,
+       [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+       [USB0_AUX_CLK_SRC] = &usb0_aux_clk_src.clkr,
+       [USB0_MOCK_UTMI_CLK_SRC] = &usb0_mock_utmi_clk_src.clkr,
+       [USB0_PIPE_CLK_SRC] = &usb0_pipe_clk_src.clkr,
+       [USB1_MOCK_UTMI_CLK_SRC] = &usb1_mock_utmi_clk_src.clkr,
+       [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
+       [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+       [GCC_APSS_AXI_CLK] = &gcc_apss_axi_clk.clkr,
+       [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+       [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+       [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+       [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+       [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+       [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+       [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+       [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+       [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+       [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+       [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+       [GCC_XO_CLK] = &gcc_xo_clk.clkr,
+       [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+       [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+       [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+       [GCC_MDIO_AHB_CLK] = &gcc_mdio_ahb_clk.clkr,
+       [GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr,
+       [GCC_NSS_CE_APB_CLK] = &gcc_nss_ce_apb_clk.clkr,
+       [GCC_NSS_CE_AXI_CLK] = &gcc_nss_ce_axi_clk.clkr,
+       [GCC_NSS_CFG_CLK] = &gcc_nss_cfg_clk.clkr,
+       [GCC_NSS_CRYPTO_CLK] = &gcc_nss_crypto_clk.clkr,
+       [GCC_NSS_CSR_CLK] = &gcc_nss_csr_clk.clkr,
+       [GCC_NSS_EDMA_CFG_CLK] = &gcc_nss_edma_cfg_clk.clkr,
+       [GCC_NSS_EDMA_CLK] = &gcc_nss_edma_clk.clkr,
+       [GCC_NSS_NOC_CLK] = &gcc_nss_noc_clk.clkr,
+       [GCC_UBI0_UTCM_CLK] = &gcc_ubi0_utcm_clk.clkr,
+       [GCC_SNOC_NSSNOC_CLK] = &gcc_snoc_nssnoc_clk.clkr,
+       [GCC_NSS_PORT1_RX_CLK] = &gcc_nss_port1_rx_clk.clkr,
+       [GCC_NSS_PORT1_TX_CLK] = &gcc_nss_port1_tx_clk.clkr,
+       [GCC_NSS_PORT2_RX_CLK] = &gcc_nss_port2_rx_clk.clkr,
+       [GCC_NSS_PORT2_TX_CLK] = &gcc_nss_port2_tx_clk.clkr,
+       [GCC_NSS_PORT3_RX_CLK] = &gcc_nss_port3_rx_clk.clkr,
+       [GCC_NSS_PORT3_TX_CLK] = &gcc_nss_port3_tx_clk.clkr,
+       [GCC_NSS_PORT4_RX_CLK] = &gcc_nss_port4_rx_clk.clkr,
+       [GCC_NSS_PORT4_TX_CLK] = &gcc_nss_port4_tx_clk.clkr,
+       [GCC_NSS_PORT5_RX_CLK] = &gcc_nss_port5_rx_clk.clkr,
+       [GCC_NSS_PORT5_TX_CLK] = &gcc_nss_port5_tx_clk.clkr,
+       [GCC_NSS_PPE_CFG_CLK] = &gcc_nss_ppe_cfg_clk.clkr,
+       [GCC_NSS_PPE_CLK] = &gcc_nss_ppe_clk.clkr,
+       [GCC_NSS_PPE_IPE_CLK] = &gcc_nss_ppe_ipe_clk.clkr,
+       [GCC_NSS_PTP_REF_CLK] = &gcc_nss_ptp_ref_clk.clkr,
+       [GCC_NSSNOC_CE_APB_CLK] = &gcc_nssnoc_ce_apb_clk.clkr,
+       [GCC_NSSNOC_CE_AXI_CLK] = &gcc_nssnoc_ce_axi_clk.clkr,
+       [GCC_NSSNOC_CRYPTO_CLK] = &gcc_nssnoc_crypto_clk.clkr,
+       [GCC_NSSNOC_PPE_CFG_CLK] = &gcc_nssnoc_ppe_cfg_clk.clkr,
+       [GCC_NSSNOC_PPE_CLK] = &gcc_nssnoc_ppe_clk.clkr,
+       [GCC_NSSNOC_QOSGEN_REF_CLK] = &gcc_nssnoc_qosgen_ref_clk.clkr,
+       [GCC_NSSNOC_SNOC_CLK] = &gcc_nssnoc_snoc_clk.clkr,
+       [GCC_NSSNOC_TIMEOUT_REF_CLK] = &gcc_nssnoc_timeout_ref_clk.clkr,
+       [GCC_NSSNOC_UBI0_AHB_CLK] = &gcc_nssnoc_ubi0_ahb_clk.clkr,
+       [GCC_PORT1_MAC_CLK] = &gcc_port1_mac_clk.clkr,
+       [GCC_PORT2_MAC_CLK] = &gcc_port2_mac_clk.clkr,
+       [GCC_PORT3_MAC_CLK] = &gcc_port3_mac_clk.clkr,
+       [GCC_PORT4_MAC_CLK] = &gcc_port4_mac_clk.clkr,
+       [GCC_PORT5_MAC_CLK] = &gcc_port5_mac_clk.clkr,
+       [GCC_UBI0_AHB_CLK] = &gcc_ubi0_ahb_clk.clkr,
+       [GCC_UBI0_AXI_CLK] = &gcc_ubi0_axi_clk.clkr,
+       [GCC_UBI0_NC_AXI_CLK] = &gcc_ubi0_nc_axi_clk.clkr,
+       [GCC_UBI0_CORE_CLK] = &gcc_ubi0_core_clk.clkr,
+       [GCC_PCIE0_AHB_CLK] = &gcc_pcie0_ahb_clk.clkr,
+       [GCC_PCIE0_AUX_CLK] = &gcc_pcie0_aux_clk.clkr,
+       [GCC_PCIE0_AXI_M_CLK] = &gcc_pcie0_axi_m_clk.clkr,
+       [GCC_PCIE0_AXI_S_CLK] = &gcc_pcie0_axi_s_clk.clkr,
+       [GCC_SYS_NOC_PCIE0_AXI_CLK] = &gcc_sys_noc_pcie0_axi_clk.clkr,
+       [GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
+       [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+       [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+       [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+       [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+       [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+       [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+       [GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
+       [GCC_UNIPHY0_PORT1_RX_CLK] = &gcc_uniphy0_port1_rx_clk.clkr,
+       [GCC_UNIPHY0_PORT1_TX_CLK] = &gcc_uniphy0_port1_tx_clk.clkr,
+       [GCC_UNIPHY0_PORT2_RX_CLK] = &gcc_uniphy0_port2_rx_clk.clkr,
+       [GCC_UNIPHY0_PORT2_TX_CLK] = &gcc_uniphy0_port2_tx_clk.clkr,
+       [GCC_UNIPHY0_PORT3_RX_CLK] = &gcc_uniphy0_port3_rx_clk.clkr,
+       [GCC_UNIPHY0_PORT3_TX_CLK] = &gcc_uniphy0_port3_tx_clk.clkr,
+       [GCC_UNIPHY0_PORT4_RX_CLK] = &gcc_uniphy0_port4_rx_clk.clkr,
+       [GCC_UNIPHY0_PORT4_TX_CLK] = &gcc_uniphy0_port4_tx_clk.clkr,
+       [GCC_UNIPHY0_PORT5_RX_CLK] = &gcc_uniphy0_port5_rx_clk.clkr,
+       [GCC_UNIPHY0_PORT5_TX_CLK] = &gcc_uniphy0_port5_tx_clk.clkr,
+       [GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
+       [GCC_UNIPHY1_AHB_CLK] = &gcc_uniphy1_ahb_clk.clkr,
+       [GCC_UNIPHY1_PORT5_RX_CLK] = &gcc_uniphy1_port5_rx_clk.clkr,
+       [GCC_UNIPHY1_PORT5_TX_CLK] = &gcc_uniphy1_port5_tx_clk.clkr,
+       [GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
+       [GCC_USB0_AUX_CLK] = &gcc_usb0_aux_clk.clkr,
+       [GCC_SYS_NOC_USB0_AXI_CLK] = &gcc_sys_noc_usb0_axi_clk.clkr,
+       [GCC_SNOC_BUS_TIMEOUT2_AHB_CLK] = &gcc_snoc_bus_timeout2_ahb_clk.clkr,
+       [GCC_USB0_MASTER_CLK] = &gcc_usb0_master_clk.clkr,
+       [GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr,
+       [GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr,
+       [GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
+       [GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
+       [GCC_USB1_MASTER_CLK] = &gcc_usb1_master_clk.clkr,
+       [GCC_USB1_MOCK_UTMI_CLK] = &gcc_usb1_mock_utmi_clk.clkr,
+       [GCC_USB1_PHY_CFG_AHB_CLK] = &gcc_usb1_phy_cfg_ahb_clk.clkr,
+       [GCC_USB1_SLEEP_CLK] = &gcc_usb1_sleep_clk.clkr,
+       [GCC_CMN_12GPLL_AHB_CLK] = &gcc_cmn_12gpll_ahb_clk.clkr,
+       [GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
+       [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+       [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+       [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+       [PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr,
+       [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+       [PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
+       [WCSS_AHB_CLK_SRC] = &wcss_ahb_clk_src.clkr,
+       [Q6_AXI_CLK_SRC] = &q6_axi_clk_src.clkr,
+       [RBCPR_WCSS_CLK_SRC] = &rbcpr_wcss_clk_src.clkr,
+       [GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr,
+       [LPASS_CORE_AXIM_CLK_SRC] = &lpass_core_axim_clk_src.clkr,
+       [GCC_LPASS_SNOC_CFG_CLK] = &gcc_lpass_snoc_cfg_clk.clkr,
+       [LPASS_SNOC_CFG_CLK_SRC] = &lpass_snoc_cfg_clk_src.clkr,
+       [GCC_LPASS_Q6_AXIM_CLK] = &gcc_lpass_q6_axim_clk.clkr,
+       [LPASS_Q6_AXIM_CLK_SRC] = &lpass_q6_axim_clk_src.clkr,
+       [GCC_LPASS_Q6_ATBM_AT_CLK] = &gcc_lpass_q6_atbm_at_clk.clkr,
+       [GCC_LPASS_Q6_PCLKDBG_CLK] = &gcc_lpass_q6_pclkdbg_clk.clkr,
+       [GCC_LPASS_Q6SS_TSCTR_1TO2_CLK] = &gcc_lpass_q6ss_tsctr_1to2_clk.clkr,
+       [GCC_LPASS_Q6SS_TRIG_CLK] = &gcc_lpass_q6ss_trig_clk.clkr,
+       [GCC_LPASS_TBU_CLK] = &gcc_lpass_tbu_clk.clkr,
+       [GCC_PCNOC_LPASS_CLK] = &gcc_pcnoc_lpass_clk.clkr,
+       [GCC_MEM_NOC_UBI32_CLK] = &gcc_mem_noc_ubi32_clk.clkr,
+       [GCC_MEM_NOC_LPASS_CLK] = &gcc_mem_noc_lpass_clk.clkr,
+       [GCC_SNOC_LPASS_CFG_CLK] = &gcc_snoc_lpass_cfg_clk.clkr,
+       [QDSS_STM_CLK_SRC] = &qdss_stm_clk_src.clkr,
+       [QDSS_TRACECLKIN_CLK_SRC] = &qdss_traceclkin_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq6018_resets[] = {
+       [GCC_BLSP1_BCR] = { 0x01000, 0 },
+       [GCC_BLSP1_QUP1_BCR] = { 0x02000, 0 },
+       [GCC_BLSP1_UART1_BCR] = { 0x02038, 0 },
+       [GCC_BLSP1_QUP2_BCR] = { 0x03008, 0 },
+       [GCC_BLSP1_UART2_BCR] = { 0x03028, 0 },
+       [GCC_BLSP1_QUP3_BCR] = { 0x04008, 0 },
+       [GCC_BLSP1_UART3_BCR] = { 0x04028, 0 },
+       [GCC_BLSP1_QUP4_BCR] = { 0x05008, 0 },
+       [GCC_BLSP1_UART4_BCR] = { 0x05028, 0 },
+       [GCC_BLSP1_QUP5_BCR] = { 0x06008, 0 },
+       [GCC_BLSP1_UART5_BCR] = { 0x06028, 0 },
+       [GCC_BLSP1_QUP6_BCR] = { 0x07008, 0 },
+       [GCC_BLSP1_UART6_BCR] = { 0x07028, 0 },
+       [GCC_IMEM_BCR] = { 0x0e000, 0 },
+       [GCC_SMMU_BCR] = { 0x12000, 0 },
+       [GCC_APSS_TCU_BCR] = { 0x12050, 0 },
+       [GCC_SMMU_XPU_BCR] = { 0x12054, 0 },
+       [GCC_PCNOC_TBU_BCR] = { 0x12058, 0 },
+       [GCC_SMMU_CFG_BCR] = { 0x1208c, 0 },
+       [GCC_PRNG_BCR] = { 0x13000, 0 },
+       [GCC_BOOT_ROM_BCR] = { 0x13008, 0 },
+       [GCC_CRYPTO_BCR] = { 0x16000, 0 },
+       [GCC_WCSS_BCR] = { 0x18000, 0 },
+       [GCC_WCSS_Q6_BCR] = { 0x18100, 0 },
+       [GCC_NSS_BCR] = { 0x19000, 0 },
+       [GCC_SEC_CTRL_BCR] = { 0x1a000, 0 },
+       [GCC_ADSS_BCR] = { 0x1c000, 0 },
+       [GCC_DDRSS_BCR] = { 0x1e000, 0 },
+       [GCC_SYSTEM_NOC_BCR] = { 0x26000, 0 },
+       [GCC_PCNOC_BCR] = { 0x27018, 0 },
+       [GCC_TCSR_BCR] = { 0x28000, 0 },
+       [GCC_QDSS_BCR] = { 0x29000, 0 },
+       [GCC_DCD_BCR] = { 0x2a000, 0 },
+       [GCC_MSG_RAM_BCR] = { 0x2b000, 0 },
+       [GCC_MPM_BCR] = { 0x2c000, 0 },
+       [GCC_SPDM_BCR] = { 0x2f000, 0 },
+       [GCC_RBCPR_BCR] = { 0x33000, 0 },
+       [GCC_RBCPR_MX_BCR] = { 0x33014, 0 },
+       [GCC_TLMM_BCR] = { 0x34000, 0 },
+       [GCC_RBCPR_WCSS_BCR] = { 0x3a000, 0 },
+       [GCC_USB0_PHY_BCR] = { 0x3e034, 0 },
+       [GCC_USB3PHY_0_PHY_BCR] = { 0x3e03c, 0 },
+       [GCC_USB0_BCR] = { 0x3e070, 0 },
+       [GCC_USB1_BCR] = { 0x3f070, 0 },
+       [GCC_QUSB2_0_PHY_BCR] = { 0x4103c, 0 },
+       [GCC_QUSB2_1_PHY_BCR] = { 0x41040, 0 },
+       [GCC_SDCC1_BCR] = { 0x42000, 0 },
+       [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000, 0 },
+       [GCC_SNOC_BUS_TIMEOUT1_BCR] = { 0x47008, 0 },
+       [GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x47010, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040, 0 },
+       [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048, 0 },
+       [GCC_UNIPHY0_BCR] = { 0x56000, 0 },
+       [GCC_UNIPHY1_BCR] = { 0x56100, 0 },
+       [GCC_CMN_12GPLL_BCR] = { 0x56300, 0 },
+       [GCC_QPIC_BCR] = { 0x57018, 0 },
+       [GCC_MDIO_BCR] = { 0x58000, 0 },
+       [GCC_WCSS_CORE_TBU_BCR] = { 0x66000, 0 },
+       [GCC_WCSS_Q6_TBU_BCR] = { 0x67000, 0 },
+       [GCC_USB0_TBU_BCR] = { 0x6a000, 0 },
+       [GCC_PCIE0_TBU_BCR] = { 0x6b000, 0 },
+       [GCC_NSS_NOC_TBU_BCR] = { 0x6e000, 0 },
+       [GCC_PCIE0_BCR] = { 0x75004, 0 },
+       [GCC_PCIE0_PHY_BCR] = { 0x75038, 0 },
+       [GCC_PCIE0PHY_PHY_BCR] = { 0x7503c, 0 },
+       [GCC_PCIE0_LINK_DOWN_BCR] = { 0x75044, 0 },
+       [GCC_DCC_BCR] = { 0x77000, 0 },
+       [GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x78000, 0 },
+       [GCC_SMMU_CATS_BCR] = { 0x7c000, 0 },
+       [GCC_UBI0_AXI_ARES] = { 0x68010, 0 },
+       [GCC_UBI0_AHB_ARES] = { 0x68010, 1 },
+       [GCC_UBI0_NC_AXI_ARES] = { 0x68010, 2 },
+       [GCC_UBI0_DBG_ARES] = { 0x68010, 3 },
+       [GCC_UBI0_CORE_CLAMP_ENABLE] = { 0x68010, 4 },
+       [GCC_UBI0_CLKRST_CLAMP_ENABLE] = { 0x68010, 5 },
+       [GCC_UBI0_UTCM_ARES] = { 0x68010, 6 },
+       [GCC_UBI0_CORE_ARES] = { 0x68010, 7 },
+       [GCC_NSS_CFG_ARES] = { 0x68010, 16 },
+       [GCC_NSS_NOC_ARES] = { 0x68010, 18 },
+       [GCC_NSS_CRYPTO_ARES] = { 0x68010, 19 },
+       [GCC_NSS_CSR_ARES] = { 0x68010, 20 },
+       [GCC_NSS_CE_APB_ARES] = { 0x68010, 21 },
+       [GCC_NSS_CE_AXI_ARES] = { 0x68010, 22 },
+       [GCC_NSSNOC_CE_APB_ARES] = { 0x68010, 23 },
+       [GCC_NSSNOC_CE_AXI_ARES] = { 0x68010, 24 },
+       [GCC_NSSNOC_UBI0_AHB_ARES] = { 0x68010, 25 },
+       [GCC_NSSNOC_SNOC_ARES] = { 0x68010, 27 },
+       [GCC_NSSNOC_CRYPTO_ARES] = { 0x68010, 28 },
+       [GCC_NSSNOC_ATB_ARES] = { 0x68010, 29 },
+       [GCC_NSSNOC_QOSGEN_REF_ARES] = { 0x68010, 30 },
+       [GCC_NSSNOC_TIMEOUT_REF_ARES] = { 0x68010, 31 },
+       [GCC_PCIE0_PIPE_ARES] = { 0x75040, 0 },
+       [GCC_PCIE0_SLEEP_ARES] = { 0x75040, 1 },
+       [GCC_PCIE0_CORE_STICKY_ARES] = { 0x75040, 2 },
+       [GCC_PCIE0_AXI_MASTER_ARES] = { 0x75040, 3 },
+       [GCC_PCIE0_AXI_SLAVE_ARES] = { 0x75040, 4 },
+       [GCC_PCIE0_AHB_ARES] = { 0x75040, 5 },
+       [GCC_PCIE0_AXI_MASTER_STICKY_ARES] = { 0x75040, 6 },
+       [GCC_PCIE0_AXI_SLAVE_STICKY_ARES] = { 0x75040, 7 },
+       [GCC_PPE_FULL_RESET] = { 0x68014, 0 },
+       [GCC_UNIPHY0_SOFT_RESET] = { 0x56004, 0 },
+       [GCC_UNIPHY0_XPCS_RESET] = { 0x56004, 2 },
+       [GCC_UNIPHY1_SOFT_RESET] = { 0x56104, 0 },
+       [GCC_UNIPHY1_XPCS_RESET] = { 0x56104, 2 },
+       [GCC_EDMA_HW_RESET] = { 0x68014, 0 },
+       [GCC_NSSPORT1_RESET] = { 0x68014, 0 },
+       [GCC_NSSPORT2_RESET] = { 0x68014, 0 },
+       [GCC_NSSPORT3_RESET] = { 0x68014, 0 },
+       [GCC_NSSPORT4_RESET] = { 0x68014, 0 },
+       [GCC_NSSPORT5_RESET] = { 0x68014, 0 },
+       [GCC_UNIPHY0_PORT1_ARES] = { 0x56004, 0 },
+       [GCC_UNIPHY0_PORT2_ARES] = { 0x56004, 0 },
+       [GCC_UNIPHY0_PORT3_ARES] = { 0x56004, 0 },
+       [GCC_UNIPHY0_PORT4_ARES] = { 0x56004, 0 },
+       [GCC_UNIPHY0_PORT5_ARES] = { 0x56004, 0 },
+       [GCC_UNIPHY0_PORT_4_5_RESET] = { 0x56004, 0 },
+       [GCC_UNIPHY0_PORT_4_RESET] = { 0x56004, 0 },
+       [GCC_LPASS_BCR] = {0x1F000, 0},
+       [GCC_UBI32_TBU_BCR] = {0x65000, 0},
+       [GCC_LPASS_TBU_BCR] = {0x6C000, 0},
+       [GCC_WCSSAON_RESET] = {0x59010, 0},
+       [GCC_LPASS_Q6_AXIM_ARES] = {0x1F004, 0},
+       [GCC_LPASS_Q6SS_TSCTR_1TO2_ARES] = {0x1F004, 1},
+       [GCC_LPASS_Q6SS_TRIG_ARES] = {0x1F004, 2},
+       [GCC_LPASS_Q6_ATBM_AT_ARES] = {0x1F004, 3},
+       [GCC_LPASS_Q6_PCLKDBG_ARES] = {0x1F004, 4},
+       [GCC_LPASS_CORE_AXIM_ARES] = {0x1F004, 5},
+       [GCC_LPASS_SNOC_CFG_ARES] = {0x1F004, 6},
+       [GCC_WCSS_DBG_ARES] = {0x59008, 0},
+       [GCC_WCSS_ECAHB_ARES] = {0x59008, 1},
+       [GCC_WCSS_ACMT_ARES] = {0x59008, 2},
+       [GCC_WCSS_DBG_BDG_ARES] = {0x59008, 3},
+       [GCC_WCSS_AHB_S_ARES] = {0x59008, 4},
+       [GCC_WCSS_AXI_M_ARES] = {0x59008, 5},
+       [GCC_Q6SS_DBG_ARES] = {0x59110, 0},
+       [GCC_Q6_AHB_S_ARES] = {0x59110, 1},
+       [GCC_Q6_AHB_ARES] = {0x59110, 2},
+       [GCC_Q6_AXIM2_ARES] = {0x59110, 3},
+       [GCC_Q6_AXIM_ARES] = {0x59110, 4},
+};
+
+static const struct of_device_id gcc_ipq6018_match_table[] = {
+       { .compatible = "qcom,gcc-ipq6018" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq6018_match_table);
+
+static const struct regmap_config gcc_ipq6018_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x7fffc,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc gcc_ipq6018_desc = {
+       .config = &gcc_ipq6018_regmap_config,
+       .clks = gcc_ipq6018_clks,
+       .num_clks = ARRAY_SIZE(gcc_ipq6018_clks),
+       .resets = gcc_ipq6018_resets,
+       .num_resets = ARRAY_SIZE(gcc_ipq6018_resets),
+       .clk_hws = gcc_ipq6018_hws,
+       .num_clk_hws = ARRAY_SIZE(gcc_ipq6018_hws),
+};
+
+static int gcc_ipq6018_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &gcc_ipq6018_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* Disable SW_COLLAPSE for USB0 GDSCR */
+       regmap_update_bits(regmap, 0x3e078, BIT(0), 0x0);
+       /* Enable SW_OVERRIDE for USB0 GDSCR */
+       regmap_update_bits(regmap, 0x3e078, BIT(2), BIT(2));
+       /* Disable SW_COLLAPSE for USB1 GDSCR */
+       regmap_update_bits(regmap, 0x3f078, BIT(0), 0x0);
+       /* Enable SW_OVERRIDE for USB1 GDSCR */
+       regmap_update_bits(regmap, 0x3f078, BIT(2), BIT(2));
+
+       /* SW Workaround for UBI Huyara PLL */
+       regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26));
+
+       clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+
+       clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+                               &nss_crypto_pll_config);
+
+       return qcom_cc_really_probe(pdev, &gcc_ipq6018_desc, regmap);
+}
+
+static struct platform_driver gcc_ipq6018_driver = {
+       .probe = gcc_ipq6018_probe,
+       .driver = {
+               .name   = "qcom,gcc-ipq6018",
+               .of_match_table = gcc_ipq6018_match_table,
+       },
+};
+
+static int __init gcc_ipq6018_init(void)
+{
+       return platform_driver_register(&gcc_ipq6018_driver);
+}
+core_initcall(gcc_ipq6018_init);
+
+static void __exit gcc_ipq6018_exit(void)
+{
+       platform_driver_unregister(&gcc_ipq6018_driver);
+}
+module_exit(gcc_ipq6018_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GCC IPQ6018 Driver");
+MODULE_LICENSE("GPL v2");
index d004cda..3c3a7ff 100644 (file)
@@ -3046,7 +3046,10 @@ static struct clk_branch gcc_usb3_clkref_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb3_clkref_clk",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo2",
+                               .name = "xo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3060,7 +3063,10 @@ static struct clk_branch gcc_hdmi_clkref_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_hdmi_clkref_clk",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo2",
+                               .name = "xo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3074,7 +3080,10 @@ static struct clk_branch gcc_edp_clkref_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_edp_clkref_clk",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo2",
+                               .name = "xo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3088,7 +3097,10 @@ static struct clk_branch gcc_ufs_clkref_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_clkref_clk",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo2",
+                               .name = "xo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3102,7 +3114,10 @@ static struct clk_branch gcc_pcie_clkref_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_clkref_clk",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo2",
+                               .name = "xo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3116,7 +3131,10 @@ static struct clk_branch gcc_rx2_usb2_clkref_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_rx2_usb2_clkref_clk",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo2",
+                               .name = "xo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3130,7 +3148,10 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_rx1_usb2_clkref_clk",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo2",
+                               .name = "xo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
index cf31b5d..df1d705 100644 (file)
@@ -1996,6 +1996,19 @@ static struct clk_branch gcc_gp3_clk = {
        },
 };
 
+static struct clk_branch gcc_bimc_gfx_clk = {
+       .halt_reg = 0x46040,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x46040,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_bimc_gfx_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
 static struct clk_branch gcc_gpu_bimc_gfx_clk = {
        .halt_reg = 0x71010,
        .halt_check = BRANCH_HALT,
@@ -2810,6 +2823,7 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
        [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
        [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
        [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+       [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
        [GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr,
        [GCC_GPU_BIMC_GFX_SRC_CLK] = &gcc_gpu_bimc_gfx_src_clk.clkr,
        [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
index 9b0c4ce..46d314d 100644 (file)
@@ -330,7 +330,7 @@ static struct clk_alpha_pll gpll0_ao_out_main = {
                        .parent_names = (const char *[]){ "cxo" },
                        .num_parents = 1,
                        .flags = CLK_IS_CRITICAL,
-                       .ops = &clk_alpha_pll_ops,
+                       .ops = &clk_alpha_pll_fixed_ops,
                },
        },
 };
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
new file mode 100644 (file)
index 0000000..ec61194
--- /dev/null
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK         0xF
+#define CX_GMU_CBCR_SLEEP_SHIFT                4
+#define CX_GMU_CBCR_WAKE_MASK          0xF
+#define CX_GMU_CBCR_WAKE_SHIFT         8
+#define CLK_DIS_WAIT_SHIFT             12
+#define CLK_DIS_WAIT_MASK              (0xf << CLK_DIS_WAIT_SHIFT)
+
+enum {
+       P_BI_TCXO,
+       P_CORE_BI_PLL_TEST_SE,
+       P_GPLL0_OUT_MAIN,
+       P_GPLL0_OUT_MAIN_DIV,
+       P_GPU_CC_PLL1_OUT_EVEN,
+       P_GPU_CC_PLL1_OUT_MAIN,
+       P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static const struct pll_vco fabia_vco[] = {
+       { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+       .offset = 0x100,
+       .vco_table = fabia_vco,
+       .num_vco = ARRAY_SIZE(fabia_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_pll1",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fabia_ops,
+               },
+       },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+       { P_GPLL0_OUT_MAIN, 5 },
+       { P_GPLL0_OUT_MAIN_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &gpu_cc_pll1.clkr.hw },
+       { .fw_name = "gcc_gpu_gpll0_clk_src" },
+       { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+       .cmd_rcgr = 0x1120,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = gpu_cc_parent_map_0,
+       .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpu_cc_gmu_clk_src",
+               .parent_data = gpu_cc_parent_data_0,
+               .num_parents = 5,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+       .halt_reg = 0x107c,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x107c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_crc_ahb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+       .halt_reg = 0x1098,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1098,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_gmu_clk",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+       .halt_reg = 0x108c,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x108c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_snoc_dvm_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+       .halt_reg = 0x1004,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x1004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cxo_aon_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+       .halt_reg = 0x109c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x109c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cxo_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc cx_gdsc = {
+       .gdscr = 0x106c,
+       .gds_hw_ctrl = 0x1540,
+       .pd = {
+               .name = "cx_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
+};
+
+static struct gdsc *gpu_cc_sc7180_gdscs[] = {
+       [CX_GDSC] = &cx_gdsc,
+};
+
+static struct clk_regmap *gpu_cc_sc7180_clocks[] = {
+       [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+       [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+       [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+       [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+       [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+       [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+       [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+};
+
+static const struct regmap_config gpu_cc_sc7180_regmap_config = {
+       .reg_bits =     32,
+       .reg_stride =   4,
+       .val_bits =     32,
+       .max_register = 0x8008,
+       .fast_io =      true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sc7180_desc = {
+       .config = &gpu_cc_sc7180_regmap_config,
+       .clks = gpu_cc_sc7180_clocks,
+       .num_clks = ARRAY_SIZE(gpu_cc_sc7180_clocks),
+       .gdscs = gpu_cc_sc7180_gdscs,
+       .num_gdscs = ARRAY_SIZE(gpu_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sc7180_match_table[] = {
+       { .compatible = "qcom,sc7180-gpucc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sc7180_match_table);
+
+static int gpu_cc_sc7180_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+       struct alpha_pll_config gpu_cc_pll_config = {};
+       unsigned int value, mask;
+
+       regmap = qcom_cc_map(pdev, &gpu_cc_sc7180_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* 360MHz Configuration */
+       gpu_cc_pll_config.l = 0x12;
+       gpu_cc_pll_config.alpha = 0xc000;
+       gpu_cc_pll_config.config_ctl_val = 0x20485699;
+       gpu_cc_pll_config.config_ctl_hi_val = 0x00002067;
+       gpu_cc_pll_config.user_ctl_val = 0x00000001;
+       gpu_cc_pll_config.user_ctl_hi_val = 0x00004805;
+       gpu_cc_pll_config.test_ctl_hi_val = 0x40000000;
+
+       clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll_config);
+
+       /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+       mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+       mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+       value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+       regmap_update_bits(regmap, 0x1098, mask, value);
+
+       /* Configure clk_dis_wait for gpu_cx_gdsc */
+       regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK,
+                                               8 << CLK_DIS_WAIT_SHIFT);
+
+       return qcom_cc_really_probe(pdev, &gpu_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sc7180_driver = {
+       .probe = gpu_cc_sc7180_probe,
+       .driver = {
+               .name = "sc7180-gpucc",
+               .of_match_table = gpu_cc_sc7180_match_table,
+       },
+};
+
+static int __init gpu_cc_sc7180_init(void)
+{
+       return platform_driver_register(&gpu_cc_sc7180_driver);
+}
+subsys_initcall(gpu_cc_sc7180_init);
+
+static void __exit gpu_cc_sc7180_exit(void)
+{
+       platform_driver_unregister(&gpu_cc_sc7180_driver);
+}
+module_exit(gpu_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
index a6de710..5ff7f5a 100644 (file)
@@ -53,10 +53,18 @@ static int qcom_hfpll_probe(struct platform_device *pdev)
        struct regmap *regmap;
        struct clk_hfpll *h;
        struct clk_init_data init = {
-               .parent_names = (const char *[]){ "xo" },
                .num_parents = 1,
                .ops = &clk_ops_hfpll,
+               /*
+                * rather than marking the clock critical and forcing the clock
+                * to be always enabled, we make sure that the clock is not
+                * disabled: the firmware remains responsible of enabling this
+                * clock (for more info check the commit log)
+                */
+               .flags = CLK_IGNORE_UNUSED,
        };
+       int ret;
+       struct clk_parent_data pdata = { .index = 0 };
 
        h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
        if (!h)
@@ -75,11 +83,20 @@ static int qcom_hfpll_probe(struct platform_device *pdev)
                                          0, &init.name))
                return -ENODEV;
 
+       init.parent_data = &pdata;
+
        h->d = &hdata;
        h->clkr.hw.init = &init;
        spin_lock_init(&h->lock);
 
-       return devm_clk_register_regmap(&pdev->dev, &h->clkr);
+       ret = devm_clk_register_regmap(dev, &h->clkr);
+       if (ret) {
+               dev_err(dev, "failed to register regmap clock: %d\n", ret);
+               return ret;
+       }
+
+       return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+                                          &h->clkr.hw);
 }
 
 static struct platform_driver qcom_hfpll_driver = {
index bcb0a39..0154262 100644 (file)
@@ -452,18 +452,6 @@ static struct clk_rcg2 mdp_clk_src = {
        },
 };
 
-static struct clk_rcg2 gfx3d_clk_src = {
-       .cmd_rcgr = 0x4000,
-       .hid_width = 5,
-       .parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
-       .clkr.hw.init = &(struct clk_init_data){
-               .name = "gfx3d_clk_src",
-               .parent_names = mmcc_xo_mmpll0_1_2_gpll0,
-               .num_parents = 5,
-               .ops = &clk_rcg2_ops,
-       },
-};
-
 static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
        F(75000000, P_GPLL0, 8, 0, 0),
        F(133330000, P_GPLL0, 4.5, 0, 0),
@@ -2411,7 +2399,6 @@ static struct clk_regmap *mmcc_msm8974_clocks[] = {
        [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
        [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
        [MDP_CLK_SRC] = &mdp_clk_src.clkr,
-       [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
        [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
        [JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
        [JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
new file mode 100644 (file)
index 0000000..dd68983
--- /dev/null
@@ -0,0 +1,2913 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8998.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+       P_XO,
+       P_GPLL0,
+       P_GPLL0_DIV,
+       P_MMPLL0_OUT_EVEN,
+       P_MMPLL1_OUT_EVEN,
+       P_MMPLL3_OUT_EVEN,
+       P_MMPLL4_OUT_EVEN,
+       P_MMPLL5_OUT_EVEN,
+       P_MMPLL6_OUT_EVEN,
+       P_MMPLL7_OUT_EVEN,
+       P_MMPLL10_OUT_EVEN,
+       P_DSI0PLL,
+       P_DSI1PLL,
+       P_DSI0PLL_BYTE,
+       P_DSI1PLL_BYTE,
+       P_HDMIPLL,
+       P_DPVCO,
+       P_DPLINK,
+       P_CORE_BI_PLL_TEST_SE,
+};
+
+static struct clk_fixed_factor gpll0_div = {
+       .mult = 1,
+       .div = 2,
+       .hw.init = &(struct clk_init_data){
+               .name = "mmss_gpll0_div",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "gpll0",
+                       .name = "gpll0"
+               },
+               .num_parents = 1,
+               .ops = &clk_fixed_factor_ops,
+       },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+       { 0x0, 1 },
+       { 0x1, 2 },
+       { 0x3, 4 },
+       { 0x7, 8 },
+       { }
+};
+
+static struct clk_alpha_pll mmpll0 = {
+       .offset = 0xc000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .enable_reg = 0x1e0,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmpll0",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                               .name = "xo"
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fixed_fabia_ops,
+               },
+       },
+};
+
+static struct clk_alpha_pll_postdiv mmpll0_out_even = {
+       .offset = 0xc000,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_fabia_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll0_out_even",
+               .parent_hws = (const struct clk_hw *[]){ &mmpll0.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll mmpll1 = {
+       .offset = 0xc050,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .enable_reg = 0x1e0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmpll1",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "xo",
+                               .name = "xo"
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fixed_fabia_ops,
+               },
+       },
+};
+
+static struct clk_alpha_pll_postdiv mmpll1_out_even = {
+       .offset = 0xc050,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_fabia_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll1_out_even",
+               .parent_hws = (const struct clk_hw *[]){ &mmpll1.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll mmpll3 = {
+       .offset = 0x0,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll3",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "xo",
+                       .name = "xo"
+               },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_fixed_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll_postdiv mmpll3_out_even = {
+       .offset = 0x0,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_fabia_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll3_out_even",
+               .parent_hws = (const struct clk_hw *[]){ &mmpll3.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll mmpll4 = {
+       .offset = 0x50,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll4",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "xo",
+                       .name = "xo"
+               },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_fixed_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll_postdiv mmpll4_out_even = {
+       .offset = 0x50,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_fabia_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll4_out_even",
+               .parent_hws = (const struct clk_hw *[]){ &mmpll4.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll mmpll5 = {
+       .offset = 0xa0,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll5",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "xo",
+                       .name = "xo"
+               },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_fixed_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll_postdiv mmpll5_out_even = {
+       .offset = 0xa0,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_fabia_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll5_out_even",
+               .parent_hws = (const struct clk_hw *[]){ &mmpll5.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll mmpll6 = {
+       .offset = 0xf0,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll6",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "xo",
+                       .name = "xo"
+               },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_fixed_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll_postdiv mmpll6_out_even = {
+       .offset = 0xf0,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_fabia_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll6_out_even",
+               .parent_hws = (const struct clk_hw *[]){ &mmpll6.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll mmpll7 = {
+       .offset = 0x140,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll7",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "xo",
+                       .name = "xo"
+               },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_fixed_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll_postdiv mmpll7_out_even = {
+       .offset = 0x140,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_fabia_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll7_out_even",
+               .parent_hws = (const struct clk_hw *[]){ &mmpll7.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll mmpll10 = {
+       .offset = 0x190,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll10",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "xo",
+                       .name = "xo"
+               },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_fixed_fabia_ops,
+       },
+};
+
+static struct clk_alpha_pll_postdiv mmpll10_out_even = {
+       .offset = 0x190,
+       .post_div_shift = 8,
+       .post_div_table = post_div_table_fabia_even,
+       .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll10_out_even",
+               .parent_hws = (const struct clk_hw *[]){ &mmpll10.clkr.hw },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static const struct parent_map mmss_xo_hdmi_map[] = {
+       { P_XO, 0 },
+       { P_HDMIPLL, 1 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_hdmi[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .fw_name = "hdmipll", .name = "hdmipll" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
+       { P_XO, 0 },
+       { P_DSI0PLL, 1 },
+       { P_DSI1PLL, 2 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .fw_name = "dsi0dsi", .name = "dsi0dsi" },
+       { .fw_name = "dsi1dsi", .name = "dsi1dsi" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dsibyte_map[] = {
+       { P_XO, 0 },
+       { P_DSI0PLL_BYTE, 1 },
+       { P_DSI1PLL_BYTE, 2 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dsibyte[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .fw_name = "dsi0byte", .name = "dsi0byte" },
+       { .fw_name = "dsi1byte", .name = "dsi1byte" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dp_map[] = {
+       { P_XO, 0 },
+       { P_DPLINK, 1 },
+       { P_DPVCO, 2 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dp[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .fw_name = "dplink", .name = "dplink" },
+       { .fw_name = "dpvco", .name = "dpvco" },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 5 },
+       { P_GPLL0_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .fw_name = "gpll0", .name = "gpll0" },
+       { .hw = &gpll0_div.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
+       { P_XO, 0 },
+       { P_MMPLL0_OUT_EVEN, 1 },
+       { P_GPLL0, 5 },
+       { P_GPLL0_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .hw = &mmpll0_out_even.clkr.hw },
+       { .fw_name = "gpll0", .name = "gpll0" },
+       { .hw = &gpll0_div.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
+       { P_XO, 0 },
+       { P_MMPLL0_OUT_EVEN, 1 },
+       { P_MMPLL1_OUT_EVEN, 2 },
+       { P_GPLL0, 5 },
+       { P_GPLL0_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .hw = &mmpll0_out_even.clkr.hw },
+       { .hw = &mmpll1_out_even.clkr.hw },
+       { .fw_name = "gpll0", .name = "gpll0" },
+       { .hw = &gpll0_div.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
+       { P_XO, 0 },
+       { P_MMPLL0_OUT_EVEN, 1 },
+       { P_MMPLL5_OUT_EVEN, 2 },
+       { P_GPLL0, 5 },
+       { P_GPLL0_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .hw = &mmpll0_out_even.clkr.hw },
+       { .hw = &mmpll5_out_even.clkr.hw },
+       { .fw_name = "gpll0", .name = "gpll0" },
+       { .hw = &gpll0_div.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[] = {
+       { P_XO, 0 },
+       { P_MMPLL0_OUT_EVEN, 1 },
+       { P_MMPLL3_OUT_EVEN, 3 },
+       { P_MMPLL6_OUT_EVEN, 4 },
+       { P_GPLL0, 5 },
+       { P_GPLL0_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .hw = &mmpll0_out_even.clkr.hw },
+       { .hw = &mmpll3_out_even.clkr.hw },
+       { .hw = &mmpll6_out_even.clkr.hw },
+       { .fw_name = "gpll0", .name = "gpll0" },
+       { .hw = &gpll0_div.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+       { P_XO, 0 },
+       { P_MMPLL4_OUT_EVEN, 1 },
+       { P_MMPLL7_OUT_EVEN, 2 },
+       { P_MMPLL10_OUT_EVEN, 3 },
+       { P_GPLL0, 5 },
+       { P_GPLL0_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .hw = &mmpll4_out_even.clkr.hw },
+       { .hw = &mmpll7_out_even.clkr.hw },
+       { .hw = &mmpll10_out_even.clkr.hw },
+       { .fw_name = "gpll0", .name = "gpll0" },
+       { .hw = &gpll0_div.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+       { P_XO, 0 },
+       { P_MMPLL0_OUT_EVEN, 1 },
+       { P_MMPLL7_OUT_EVEN, 2 },
+       { P_MMPLL10_OUT_EVEN, 3 },
+       { P_GPLL0, 5 },
+       { P_GPLL0_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .hw = &mmpll0_out_even.clkr.hw },
+       { .hw = &mmpll7_out_even.clkr.hw },
+       { .hw = &mmpll10_out_even.clkr.hw },
+       { .fw_name = "gpll0", .name = "gpll0" },
+       { .hw = &gpll0_div.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+       { P_XO, 0 },
+       { P_MMPLL0_OUT_EVEN, 1 },
+       { P_MMPLL4_OUT_EVEN, 2 },
+       { P_MMPLL7_OUT_EVEN, 3 },
+       { P_MMPLL10_OUT_EVEN, 4 },
+       { P_GPLL0, 5 },
+       { P_GPLL0_DIV, 6 },
+       { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+       { .fw_name = "xo", .name = "xo" },
+       { .hw = &mmpll0_out_even.clkr.hw },
+       { .hw = &mmpll4_out_even.clkr.hw },
+       { .hw = &mmpll7_out_even.clkr.hw },
+       { .hw = &mmpll10_out_even.clkr.hw },
+       { .fw_name = "gpll0", .name = "gpll0" },
+       { .hw = &gpll0_div.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+       .cmd_rcgr = 0x2120,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dsibyte_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "byte0_clk_src",
+               .parent_data = mmss_xo_dsibyte,
+               .num_parents = 4,
+               .ops = &clk_byte2_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+       .cmd_rcgr = 0x2140,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dsibyte_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "byte1_clk_src",
+               .parent_data = mmss_xo_dsibyte,
+               .num_parents = 4,
+               .ops = &clk_byte2_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+       F(37500000, P_GPLL0, 16, 0, 0),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+       .cmd_rcgr = 0x3300,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_cci_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "cci_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+       F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+       F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+       F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+       F(600000000, P_GPLL0, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+       .cmd_rcgr = 0x3640,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_cpp_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "cpp_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_csi_clk_src[] = {
+       F(164571429, P_MMPLL10_OUT_EVEN, 3.5, 0, 0),
+       F(256000000, P_MMPLL4_OUT_EVEN, 3, 0, 0),
+       F(274290000, P_MMPLL7_OUT_EVEN, 3.5, 0, 0),
+       F(300000000, P_GPLL0, 2, 0, 0),
+       F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+       F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+       .cmd_rcgr = 0x3090,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_csi_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi0_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+       .cmd_rcgr = 0x3100,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_csi_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi1_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+       .cmd_rcgr = 0x3160,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_csi_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi2_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+       .cmd_rcgr = 0x31c0,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_csi_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi3_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_csiphy_clk_src[] = {
+       F(164571429, P_MMPLL10_OUT_EVEN, 3.5, 0, 0),
+       F(256000000, P_MMPLL4_OUT_EVEN, 3, 0, 0),
+       F(274290000, P_MMPLL7_OUT_EVEN, 3.5, 0, 0),
+       F(300000000, P_GPLL0, 2, 0, 0),
+       F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 csiphy_clk_src = {
+       .cmd_rcgr = 0x3800,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_csiphy_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csiphy_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_csiphytimer_clk_src[] = {
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(269333333, P_MMPLL0_OUT_EVEN, 3, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+       .cmd_rcgr = 0x3000,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_csiphytimer_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi0phytimer_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+       .cmd_rcgr = 0x3030,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_csiphytimer_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi1phytimer_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+       .cmd_rcgr = 0x3060,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_csiphytimer_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi2phytimer_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_dp_aux_clk_src[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 dp_aux_clk_src = {
+       .cmd_rcgr = 0x2260,
+       .hid_width = 5,
+       .parent_map = mmss_xo_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_dp_aux_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "dp_aux_clk_src",
+               .parent_data = mmss_xo_gpll0_gpll0_div,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_dp_crypto_clk_src[] = {
+       F(101250, P_DPLINK, 1, 5, 16),
+       F(168750, P_DPLINK, 1, 5, 16),
+       F(337500, P_DPLINK, 1, 5, 16),
+       { }
+};
+
+static struct clk_rcg2 dp_crypto_clk_src = {
+       .cmd_rcgr = 0x2220,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dp_map,
+       .freq_tbl = ftbl_dp_crypto_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "dp_crypto_clk_src",
+               .parent_data = mmss_xo_dp,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_dp_link_clk_src[] = {
+       F(162000, P_DPLINK, 2, 0, 0),
+       F(270000, P_DPLINK, 2, 0, 0),
+       F(540000, P_DPLINK, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 dp_link_clk_src = {
+       .cmd_rcgr = 0x2200,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dp_map,
+       .freq_tbl = ftbl_dp_link_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "dp_link_clk_src",
+               .parent_data = mmss_xo_dp,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_dp_pixel_clk_src[] = {
+       F(154000000, P_DPVCO, 1, 0, 0),
+       F(337500000, P_DPVCO, 2, 0, 0),
+       F(675000000, P_DPVCO, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 dp_pixel_clk_src = {
+       .cmd_rcgr = 0x2240,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dp_map,
+       .freq_tbl = ftbl_dp_pixel_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "dp_pixel_clk_src",
+               .parent_data = mmss_xo_dp,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_esc_clk_src[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+       .cmd_rcgr = 0x2160,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dsibyte_map,
+       .freq_tbl = ftbl_esc_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "esc0_clk_src",
+               .parent_data = mmss_xo_dsibyte,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+       .cmd_rcgr = 0x2180,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dsibyte_map,
+       .freq_tbl = ftbl_esc_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "esc1_clk_src",
+               .parent_data = mmss_xo_dsibyte,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_extpclk_clk_src[] = {
+       { .src = P_HDMIPLL },
+       { }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+       .cmd_rcgr = 0x2060,
+       .hid_width = 5,
+       .parent_map = mmss_xo_hdmi_map,
+       .freq_tbl = ftbl_extpclk_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "extpclk_clk_src",
+               .parent_data = mmss_xo_hdmi,
+               .num_parents = 3,
+               .ops = &clk_byte_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static const struct freq_tbl ftbl_fd_core_clk_src[] = {
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+       F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+       F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 fd_core_clk_src = {
+       .cmd_rcgr = 0x3b00,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_fd_core_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "fd_core_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_hdmi_clk_src[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+       .cmd_rcgr = 0x2100,
+       .hid_width = 5,
+       .parent_map = mmss_xo_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_hdmi_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "hdmi_clk_src",
+               .parent_data = mmss_xo_gpll0_gpll0_div,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+       F(75000000, P_GPLL0, 8, 0, 0),
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(320000000, P_MMPLL7_OUT_EVEN, 3, 0, 0),
+       F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+       .cmd_rcgr = 0x3500,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_jpeg0_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "jpeg0_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_maxi_clk_src[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(75000000, P_GPLL0_DIV, 4, 0, 0),
+       F(171428571, P_GPLL0, 3.5, 0, 0),
+       F(323200000, P_MMPLL0_OUT_EVEN, 2.5, 0, 0),
+       F(406000000, P_MMPLL1_OUT_EVEN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 maxi_clk_src = {
+       .cmd_rcgr = 0xf020,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_maxi_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "maxi_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+       F(4800000, P_XO, 4, 0, 0),
+       F(6000000, P_GPLL0_DIV, 10, 1, 5),
+       F(8000000, P_GPLL0_DIV, 1, 2, 75),
+       F(9600000, P_XO, 2, 0, 0),
+       F(16666667, P_GPLL0_DIV, 2, 1, 9),
+       F(19200000, P_XO, 1, 0, 0),
+       F(24000000, P_GPLL0_DIV, 1, 2, 25),
+       F(33333333, P_GPLL0_DIV, 1, 2, 9),
+       F(48000000, P_GPLL0, 1, 2, 25),
+       F(66666667, P_GPLL0, 1, 2, 9),
+       { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+       .cmd_rcgr = 0x3360,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_mclk_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mclk0_clk_src",
+               .parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+       .cmd_rcgr = 0x3390,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_mclk_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mclk1_clk_src",
+               .parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+       .cmd_rcgr = 0x33c0,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_mclk_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mclk2_clk_src",
+               .parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+       .cmd_rcgr = 0x33f0,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_mclk_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mclk3_clk_src",
+               .parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+       F(85714286, P_GPLL0, 7, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(171428571, P_GPLL0, 3.5, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(275000000, P_MMPLL5_OUT_EVEN, 3, 0, 0),
+       F(300000000, P_GPLL0, 2, 0, 0),
+       F(330000000, P_MMPLL5_OUT_EVEN, 2.5, 0, 0),
+       F(412500000, P_MMPLL5_OUT_EVEN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+       .cmd_rcgr = 0x2040,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_mdp_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mdp_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+       .cmd_rcgr = 0x2080,
+       .hid_width = 5,
+       .parent_map = mmss_xo_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_vsync_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vsync_clk_src",
+               .parent_data = mmss_xo_gpll0_gpll0_div,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_ahb_clk_src[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(40000000, P_GPLL0, 15, 0, 0),
+       F(80800000, P_MMPLL0_OUT_EVEN, 10, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 ahb_clk_src = {
+       .cmd_rcgr = 0x5000,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_ahb_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ahb_clk_src",
+               .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_axi_clk_src[] = {
+       F(75000000, P_GPLL0, 8, 0, 0),
+       F(171428571, P_GPLL0, 3.5, 0, 0),
+       F(240000000, P_GPLL0, 2.5, 0, 0),
+       F(323200000, P_MMPLL0_OUT_EVEN, 2.5, 0, 0),
+       F(406000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+       { }
+};
+
+/* RO to linux */
+static struct clk_rcg2 axi_clk_src = {
+       .cmd_rcgr = 0xd000,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_axi_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "axi_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+       .cmd_rcgr = 0x2000,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dsi0pll_dsi1pll_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pclk0_clk_src",
+               .parent_data = mmss_xo_dsi0pll_dsi1pll,
+               .num_parents = 4,
+               .ops = &clk_pixel_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+       .cmd_rcgr = 0x2020,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmss_xo_dsi0pll_dsi1pll_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pclk1_clk_src",
+               .parent_data = mmss_xo_dsi0pll_dsi1pll,
+               .num_parents = 4,
+               .ops = &clk_pixel_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static const struct freq_tbl ftbl_rot_clk_src[] = {
+       F(171428571, P_GPLL0, 3.5, 0, 0),
+       F(275000000, P_MMPLL5_OUT_EVEN, 3, 0, 0),
+       F(330000000, P_MMPLL5_OUT_EVEN, 2.5, 0, 0),
+       F(412500000, P_MMPLL5_OUT_EVEN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 rot_clk_src = {
+       .cmd_rcgr = 0x21a0,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_rot_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "rot_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_video_core_clk_src[] = {
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(269330000, P_MMPLL0_OUT_EVEN, 3, 0, 0),
+       F(355200000, P_MMPLL6_OUT_EVEN, 2.5, 0, 0),
+       F(444000000, P_MMPLL6_OUT_EVEN, 2, 0, 0),
+       F(533000000, P_MMPLL3_OUT_EVEN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 video_core_clk_src = {
+       .cmd_rcgr = 0x1000,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_video_core_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "video_core_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 video_subcore0_clk_src = {
+       .cmd_rcgr = 0x1060,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_video_core_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "video_subcore0_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 video_subcore1_clk_src = {
+       .cmd_rcgr = 0x1080,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_video_core_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "video_subcore1_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_vfe_clk_src[] = {
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(300000000, P_GPLL0, 2, 0, 0),
+       F(320000000, P_MMPLL7_OUT_EVEN, 3, 0, 0),
+       F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+       F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+       F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+       F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+       F(600000000, P_GPLL0, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+       .cmd_rcgr = 0x3600,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_vfe_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vfe0_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+       .cmd_rcgr = 0x3620,
+       .hid_width = 5,
+       .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+       .freq_tbl = ftbl_vfe_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vfe1_clk_src",
+               .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+               .num_parents = 8,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch misc_ahb_clk = {
+       .halt_reg = 0x328,
+       .clkr = {
+               .enable_reg = 0x328,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "misc_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch video_core_clk = {
+       .halt_reg = 0x1028,
+       .clkr = {
+               .enable_reg = 0x1028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_core_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &video_core_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch video_ahb_clk = {
+       .halt_reg = 0x1030,
+       .clkr = {
+               .enable_reg = 0x1030,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch video_axi_clk = {
+       .halt_reg = 0x1034,
+       .clkr = {
+               .enable_reg = 0x1034,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch video_maxi_clk = {
+       .halt_reg = 0x1038,
+       .clkr = {
+               .enable_reg = 0x1038,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_maxi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch video_subcore0_clk = {
+       .halt_reg = 0x1048,
+       .clkr = {
+               .enable_reg = 0x1048,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_subcore0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &video_subcore0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch video_subcore1_clk = {
+       .halt_reg = 0x104c,
+       .clkr = {
+               .enable_reg = 0x104c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_subcore1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &video_subcore1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+       .halt_reg = 0x2308,
+       .clkr = {
+               .enable_reg = 0x2308,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_hdmi_dp_ahb_clk = {
+       .halt_reg = 0x230c,
+       .clkr = {
+               .enable_reg = 0x230c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_hdmi_dp_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_axi_clk = {
+       .halt_reg = 0x2310,
+       .clkr = {
+               .enable_reg = 0x2310,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+       .halt_reg = 0x2314,
+       .clkr = {
+               .enable_reg = 0x2314,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_pclk0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &pclk0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+       .halt_reg = 0x2318,
+       .clkr = {
+               .enable_reg = 0x2318,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_pclk1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &pclk1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+       .halt_reg = 0x231c,
+       .clkr = {
+               .enable_reg = 0x231c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_mdp_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_mdp_lut_clk = {
+       .halt_reg = 0x2320,
+       .clkr = {
+               .enable_reg = 0x2320,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_mdp_lut_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+       .halt_reg = 0x2324,
+       .clkr = {
+               .enable_reg = 0x2324,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_extpclk_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &extpclk_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+       .halt_reg = 0x2328,
+       .clkr = {
+               .enable_reg = 0x2328,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_vsync_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &vsync_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+       .halt_reg = 0x2338,
+       .clkr = {
+               .enable_reg = 0x2338,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_hdmi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &hdmi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+       .halt_reg = 0x233c,
+       .clkr = {
+               .enable_reg = 0x233c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_byte0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+       .halt_reg = 0x2340,
+       .clkr = {
+               .enable_reg = 0x2340,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_byte1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+       .halt_reg = 0x2344,
+       .clkr = {
+               .enable_reg = 0x2344,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_esc0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &esc0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+       .halt_reg = 0x2348,
+       .clkr = {
+               .enable_reg = 0x2348,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_esc1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &esc1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_rot_clk = {
+       .halt_reg = 0x2350,
+       .clkr = {
+               .enable_reg = 0x2350,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_rot_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &rot_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_dp_link_clk = {
+       .halt_reg = 0x2354,
+       .clkr = {
+               .enable_reg = 0x2354,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_dp_link_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_dp_link_intf_clk = {
+       .halt_reg = 0x2358,
+       .clkr = {
+               .enable_reg = 0x2358,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_dp_link_intf_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_dp_crypto_clk = {
+       .halt_reg = 0x235c,
+       .clkr = {
+               .enable_reg = 0x235c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_dp_crypto_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &dp_crypto_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_dp_pixel_clk = {
+       .halt_reg = 0x2360,
+       .clkr = {
+               .enable_reg = 0x2360,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_dp_pixel_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &dp_pixel_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_dp_aux_clk = {
+       .halt_reg = 0x2364,
+       .clkr = {
+               .enable_reg = 0x2364,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_dp_aux_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &dp_aux_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_byte0_intf_clk = {
+       .halt_reg = 0x2374,
+       .clkr = {
+               .enable_reg = 0x2374,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_byte0_intf_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mdss_byte1_intf_clk = {
+       .halt_reg = 0x2378,
+       .clkr = {
+               .enable_reg = 0x2378,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_byte1_intf_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0phytimer_clk = {
+       .halt_reg = 0x3024,
+       .clkr = {
+               .enable_reg = 0x3024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0phytimer_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi0phytimer_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1phytimer_clk = {
+       .halt_reg = 0x3054,
+       .clkr = {
+               .enable_reg = 0x3054,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1phytimer_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi1phytimer_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2phytimer_clk = {
+       .halt_reg = 0x3084,
+       .clkr = {
+               .enable_reg = 0x3084,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2phytimer_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi2phytimer_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0_clk = {
+       .halt_reg = 0x30b4,
+       .clkr = {
+               .enable_reg = 0x30b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+       .halt_reg = 0x30bc,
+       .clkr = {
+               .enable_reg = 0x30bc,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+       .halt_reg = 0x30d4,
+       .clkr = {
+               .enable_reg = 0x30d4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0rdi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+       .halt_reg = 0x30e4,
+       .clkr = {
+               .enable_reg = 0x30e4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0pix_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1_clk = {
+       .halt_reg = 0x3124,
+       .clkr = {
+               .enable_reg = 0x3124,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+       .halt_reg = 0x3128,
+       .clkr = {
+               .enable_reg = 0x3128,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+       .halt_reg = 0x3144,
+       .clkr = {
+               .enable_reg = 0x3144,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1rdi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+       .halt_reg = 0x3154,
+       .clkr = {
+               .enable_reg = 0x3154,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1pix_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2_clk = {
+       .halt_reg = 0x3184,
+       .clkr = {
+               .enable_reg = 0x3184,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+       .halt_reg = 0x3188,
+       .clkr = {
+               .enable_reg = 0x3188,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+       .halt_reg = 0x31a4,
+       .clkr = {
+               .enable_reg = 0x31a4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2rdi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+       .halt_reg = 0x31b4,
+       .clkr = {
+               .enable_reg = 0x31b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2pix_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3_clk = {
+       .halt_reg = 0x31e4,
+       .clkr = {
+               .enable_reg = 0x31e4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+       .halt_reg = 0x31e8,
+       .clkr = {
+               .enable_reg = 0x31e8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+       .halt_reg = 0x3204,
+       .clkr = {
+               .enable_reg = 0x3204,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3rdi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+       .halt_reg = 0x3214,
+       .clkr = {
+               .enable_reg = 0x3214,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3pix_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+       .halt_reg = 0x3224,
+       .clkr = {
+               .enable_reg = 0x3224,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_ispif_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_cci_clk = {
+       .halt_reg = 0x3344,
+       .clkr = {
+               .enable_reg = 0x3344,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cci_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &cci_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_cci_ahb_clk = {
+       .halt_reg = 0x3348,
+       .clkr = {
+               .enable_reg = 0x3348,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cci_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+       .halt_reg = 0x3384,
+       .clkr = {
+               .enable_reg = 0x3384,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_mclk0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &mclk0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+       .halt_reg = 0x33b4,
+       .clkr = {
+               .enable_reg = 0x33b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_mclk1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &mclk1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+       .halt_reg = 0x33e4,
+       .clkr = {
+               .enable_reg = 0x33e4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_mclk2_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &mclk2_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+       .halt_reg = 0x3414,
+       .clkr = {
+               .enable_reg = 0x3414,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_mclk3_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &mclk3_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+       .halt_reg = 0x3484,
+       .clkr = {
+               .enable_reg = 0x3484,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_top_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_ahb_clk = {
+       .halt_reg = 0x348c,
+       .clkr = {
+               .enable_reg = 0x348c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+       .halt_reg = 0x3494,
+       .clkr = {
+               .enable_reg = 0x3494,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_micro_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_jpeg0_clk = {
+       .halt_reg = 0x35a8,
+       .clkr = {
+               .enable_reg = 0x35a8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_jpeg0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &jpeg0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_jpeg_ahb_clk = {
+       .halt_reg = 0x35b4,
+       .clkr = {
+               .enable_reg = 0x35b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_jpeg_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_jpeg_axi_clk = {
+       .halt_reg = 0x35b8,
+       .clkr = {
+               .enable_reg = 0x35b8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_jpeg_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe0_ahb_clk = {
+       .halt_reg = 0x3668,
+       .clkr = {
+               .enable_reg = 0x3668,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe0_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe1_ahb_clk = {
+       .halt_reg = 0x3678,
+       .clkr = {
+               .enable_reg = 0x3678,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe1_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe0_clk = {
+       .halt_reg = 0x36a8,
+       .clkr = {
+               .enable_reg = 0x36a8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe1_clk = {
+       .halt_reg = 0x36ac,
+       .clkr = {
+               .enable_reg = 0x36ac,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_cpp_clk = {
+       .halt_reg = 0x36b0,
+       .clkr = {
+               .enable_reg = 0x36b0,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cpp_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &cpp_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_cpp_ahb_clk = {
+       .halt_reg = 0x36b4,
+       .clkr = {
+               .enable_reg = 0x36b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cpp_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe_vbif_ahb_clk = {
+       .halt_reg = 0x36b8,
+       .clkr = {
+               .enable_reg = 0x36b8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe_vbif_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe_vbif_axi_clk = {
+       .halt_reg = 0x36bc,
+       .clkr = {
+               .enable_reg = 0x36bc,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe_vbif_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_cpp_axi_clk = {
+       .halt_reg = 0x36c4,
+       .clkr = {
+               .enable_reg = 0x36c4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cpp_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_cpp_vbif_ahb_clk = {
+       .halt_reg = 0x36c8,
+       .clkr = {
+               .enable_reg = 0x36c8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cpp_vbif_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+       .halt_reg = 0x3704,
+       .clkr = {
+               .enable_reg = 0x3704,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi_vfe0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+       .halt_reg = 0x3714,
+       .clkr = {
+               .enable_reg = 0x3714,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi_vfe1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe0_stream_clk = {
+       .halt_reg = 0x3720,
+       .clkr = {
+               .enable_reg = 0x3720,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe0_stream_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe1_stream_clk = {
+       .halt_reg = 0x3724,
+       .clkr = {
+               .enable_reg = 0x3724,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe1_stream_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_cphy_csid0_clk = {
+       .halt_reg = 0x3730,
+       .clkr = {
+               .enable_reg = 0x3730,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cphy_csid0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_cphy_csid1_clk = {
+       .halt_reg = 0x3734,
+       .clkr = {
+               .enable_reg = 0x3734,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cphy_csid1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_cphy_csid2_clk = {
+       .halt_reg = 0x3738,
+       .clkr = {
+               .enable_reg = 0x3738,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cphy_csid2_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_cphy_csid3_clk = {
+       .halt_reg = 0x373c,
+       .clkr = {
+               .enable_reg = 0x373c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cphy_csid3_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csiphy0_clk = {
+       .halt_reg = 0x3740,
+       .clkr = {
+               .enable_reg = 0x3740,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csiphy0_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csiphy1_clk = {
+       .halt_reg = 0x3744,
+       .clkr = {
+               .enable_reg = 0x3744,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csiphy1_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch camss_csiphy2_clk = {
+       .halt_reg = 0x3748,
+       .clkr = {
+               .enable_reg = 0x3748,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csiphy2_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch fd_core_clk = {
+       .halt_reg = 0x3b68,
+       .clkr = {
+               .enable_reg = 0x3b68,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "fd_core_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &fd_core_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch fd_core_uar_clk = {
+       .halt_reg = 0x3b6c,
+       .clkr = {
+               .enable_reg = 0x3b6c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "fd_core_uar_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &fd_core_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch fd_ahb_clk = {
+       .halt_reg = 0x3b74,
+       .clkr = {
+               .enable_reg = 0x3b74,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "fd_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch mnoc_ahb_clk = {
+       .halt_reg = 0x5024,
+       .clkr = {
+               .enable_reg = 0x5024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mnoc_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch bimc_smmu_ahb_clk = {
+       .halt_reg = 0xe004,
+       .clkr = {
+               .enable_reg = 0xe004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "bimc_smmu_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch bimc_smmu_axi_clk = {
+       .halt_reg = 0xe008,
+       .clkr = {
+               .enable_reg = 0xe008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "bimc_smmu_axi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mnoc_maxi_clk = {
+       .halt_reg = 0xf004,
+       .clkr = {
+               .enable_reg = 0xf004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mnoc_maxi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch vmem_maxi_clk = {
+       .halt_reg = 0xf064,
+       .clkr = {
+               .enable_reg = 0xf064,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vmem_maxi_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch vmem_ahb_clk = {
+       .halt_reg = 0xf068,
+       .clkr = {
+               .enable_reg = 0xf068,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vmem_ahb_clk",
+                       .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_hw *mmcc_msm8998_hws[] = {
+       &gpll0_div.hw,
+};
+
+static struct gdsc video_top_gdsc = {
+       .gdscr = 0x1024,
+       .pd = {
+               .name = "video_top",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc video_subcore0_gdsc = {
+       .gdscr = 0x1040,
+       .pd = {
+               .name = "video_subcore0",
+       },
+       .parent = &video_top_gdsc.pd,
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc video_subcore1_gdsc = {
+       .gdscr = 0x1044,
+       .pd = {
+               .name = "video_subcore1",
+       },
+       .parent = &video_top_gdsc.pd,
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+       .gdscr = 0x2304,
+       .cxcs = (unsigned int []){ 0x2310, 0x2350, 0x231c, 0x2320 },
+       .cxc_count = 4,
+       .pd = {
+               .name = "mdss",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_top_gdsc = {
+       .gdscr = 0x34a0,
+       .cxcs = (unsigned int []){ 0x35b8, 0x36c4, 0x3704, 0x3714, 0x3494,
+                                  0x35a8, 0x3868 },
+       .cxc_count = 7,
+       .pd = {
+               .name = "camss_top",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe0_gdsc = {
+       .gdscr = 0x3664,
+       .pd = {
+               .name = "camss_vfe0",
+       },
+       .parent = &camss_top_gdsc.pd,
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe1_gdsc = {
+       .gdscr = 0x3674,
+       .pd = {
+               .name = "camss_vfe1_gdsc",
+       },
+       .parent = &camss_top_gdsc.pd,
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_cpp_gdsc = {
+       .gdscr = 0x36d4,
+       .pd = {
+               .name = "camss_cpp",
+       },
+       .parent = &camss_top_gdsc.pd,
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc bimc_smmu_gdsc = {
+       .gdscr = 0xe020,
+       .gds_hw_ctrl = 0xe024,
+       .pd = {
+               .name = "bimc_smmu",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = HW_CTRL,
+};
+
+static struct clk_regmap *mmcc_msm8998_clocks[] = {
+       [MMPLL0] = &mmpll0.clkr,
+       [MMPLL0_OUT_EVEN] = &mmpll0_out_even.clkr,
+       [MMPLL1] = &mmpll1.clkr,
+       [MMPLL1_OUT_EVEN] = &mmpll1_out_even.clkr,
+       [MMPLL3] = &mmpll3.clkr,
+       [MMPLL3_OUT_EVEN] = &mmpll3_out_even.clkr,
+       [MMPLL4] = &mmpll4.clkr,
+       [MMPLL4_OUT_EVEN] = &mmpll4_out_even.clkr,
+       [MMPLL5] = &mmpll5.clkr,
+       [MMPLL5_OUT_EVEN] = &mmpll5_out_even.clkr,
+       [MMPLL6] = &mmpll6.clkr,
+       [MMPLL6_OUT_EVEN] = &mmpll6_out_even.clkr,
+       [MMPLL7] = &mmpll7.clkr,
+       [MMPLL7_OUT_EVEN] = &mmpll7_out_even.clkr,
+       [MMPLL10] = &mmpll10.clkr,
+       [MMPLL10_OUT_EVEN] = &mmpll10_out_even.clkr,
+       [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+       [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+       [CCI_CLK_SRC] = &cci_clk_src.clkr,
+       [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+       [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+       [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+       [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+       [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+       [CSIPHY_CLK_SRC] = &csiphy_clk_src.clkr,
+       [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+       [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+       [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+       [DP_AUX_CLK_SRC] = &dp_aux_clk_src.clkr,
+       [DP_CRYPTO_CLK_SRC] = &dp_crypto_clk_src.clkr,
+       [DP_LINK_CLK_SRC] = &dp_link_clk_src.clkr,
+       [DP_PIXEL_CLK_SRC] = &dp_pixel_clk_src.clkr,
+       [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+       [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+       [EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+       [FD_CORE_CLK_SRC] = &fd_core_clk_src.clkr,
+       [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+       [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+       [MAXI_CLK_SRC] = &maxi_clk_src.clkr,
+       [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+       [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+       [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+       [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+       [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+       [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+       [AHB_CLK_SRC] = &ahb_clk_src.clkr,
+       [AXI_CLK_SRC] = &axi_clk_src.clkr,
+       [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+       [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+       [ROT_CLK_SRC] = &rot_clk_src.clkr,
+       [VIDEO_CORE_CLK_SRC] = &video_core_clk_src.clkr,
+       [VIDEO_SUBCORE0_CLK_SRC] = &video_subcore0_clk_src.clkr,
+       [VIDEO_SUBCORE1_CLK_SRC] = &video_subcore1_clk_src.clkr,
+       [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+       [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+       [MISC_AHB_CLK] = &misc_ahb_clk.clkr,
+       [VIDEO_CORE_CLK] = &video_core_clk.clkr,
+       [VIDEO_AHB_CLK] = &video_ahb_clk.clkr,
+       [VIDEO_AXI_CLK] = &video_axi_clk.clkr,
+       [VIDEO_MAXI_CLK] = &video_maxi_clk.clkr,
+       [VIDEO_SUBCORE0_CLK] = &video_subcore0_clk.clkr,
+       [VIDEO_SUBCORE1_CLK] = &video_subcore1_clk.clkr,
+       [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+       [MDSS_HDMI_DP_AHB_CLK] = &mdss_hdmi_dp_ahb_clk.clkr,
+       [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+       [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+       [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+       [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+       [MDSS_MDP_LUT_CLK] = &mdss_mdp_lut_clk.clkr,
+       [MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+       [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+       [MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+       [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+       [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+       [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+       [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+       [MDSS_ROT_CLK] = &mdss_rot_clk.clkr,
+       [MDSS_DP_LINK_CLK] = &mdss_dp_link_clk.clkr,
+       [MDSS_DP_LINK_INTF_CLK] = &mdss_dp_link_intf_clk.clkr,
+       [MDSS_DP_CRYPTO_CLK] = &mdss_dp_crypto_clk.clkr,
+       [MDSS_DP_PIXEL_CLK] = &mdss_dp_pixel_clk.clkr,
+       [MDSS_DP_AUX_CLK] = &mdss_dp_aux_clk.clkr,
+       [MDSS_BYTE0_INTF_CLK] = &mdss_byte0_intf_clk.clkr,
+       [MDSS_BYTE1_INTF_CLK] = &mdss_byte1_intf_clk.clkr,
+       [CAMSS_CSI0PHYTIMER_CLK] = &camss_csi0phytimer_clk.clkr,
+       [CAMSS_CSI1PHYTIMER_CLK] = &camss_csi1phytimer_clk.clkr,
+       [CAMSS_CSI2PHYTIMER_CLK] = &camss_csi2phytimer_clk.clkr,
+       [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+       [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+       [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+       [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+       [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+       [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+       [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+       [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+       [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+       [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+       [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+       [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+       [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+       [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+       [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+       [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+       [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+       [CAMSS_CCI_CLK] = &camss_cci_clk.clkr,
+       [CAMSS_CCI_AHB_CLK] = &camss_cci_ahb_clk.clkr,
+       [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+       [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+       [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+       [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+       [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+       [CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+       [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+       [CAMSS_JPEG0_CLK] = &camss_jpeg0_clk.clkr,
+       [CAMSS_JPEG_AHB_CLK] = &camss_jpeg_ahb_clk.clkr,
+       [CAMSS_JPEG_AXI_CLK] = &camss_jpeg_axi_clk.clkr,
+       [CAMSS_VFE0_AHB_CLK] = &camss_vfe0_ahb_clk.clkr,
+       [CAMSS_VFE1_AHB_CLK] = &camss_vfe1_ahb_clk.clkr,
+       [CAMSS_VFE0_CLK] = &camss_vfe0_clk.clkr,
+       [CAMSS_VFE1_CLK] = &camss_vfe1_clk.clkr,
+       [CAMSS_CPP_CLK] = &camss_cpp_clk.clkr,
+       [CAMSS_CPP_AHB_CLK] = &camss_cpp_ahb_clk.clkr,
+       [CAMSS_VFE_VBIF_AHB_CLK] = &camss_vfe_vbif_ahb_clk.clkr,
+       [CAMSS_VFE_VBIF_AXI_CLK] = &camss_vfe_vbif_axi_clk.clkr,
+       [CAMSS_CPP_AXI_CLK] = &camss_cpp_axi_clk.clkr,
+       [CAMSS_CPP_VBIF_AHB_CLK] = &camss_cpp_vbif_ahb_clk.clkr,
+       [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+       [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+       [CAMSS_VFE0_STREAM_CLK] = &camss_vfe0_stream_clk.clkr,
+       [CAMSS_VFE1_STREAM_CLK] = &camss_vfe1_stream_clk.clkr,
+       [CAMSS_CPHY_CSID0_CLK] = &camss_cphy_csid0_clk.clkr,
+       [CAMSS_CPHY_CSID1_CLK] = &camss_cphy_csid1_clk.clkr,
+       [CAMSS_CPHY_CSID2_CLK] = &camss_cphy_csid2_clk.clkr,
+       [CAMSS_CPHY_CSID3_CLK] = &camss_cphy_csid3_clk.clkr,
+       [CAMSS_CSIPHY0_CLK] = &camss_csiphy0_clk.clkr,
+       [CAMSS_CSIPHY1_CLK] = &camss_csiphy1_clk.clkr,
+       [CAMSS_CSIPHY2_CLK] = &camss_csiphy2_clk.clkr,
+       [FD_CORE_CLK] = &fd_core_clk.clkr,
+       [FD_CORE_UAR_CLK] = &fd_core_uar_clk.clkr,
+       [FD_AHB_CLK] = &fd_ahb_clk.clkr,
+       [MNOC_AHB_CLK] = &mnoc_ahb_clk.clkr,
+       [BIMC_SMMU_AHB_CLK] = &bimc_smmu_ahb_clk.clkr,
+       [BIMC_SMMU_AXI_CLK] = &bimc_smmu_axi_clk.clkr,
+       [MNOC_MAXI_CLK] = &mnoc_maxi_clk.clkr,
+       [VMEM_MAXI_CLK] = &vmem_maxi_clk.clkr,
+       [VMEM_AHB_CLK] = &vmem_ahb_clk.clkr,
+};
+
+static struct gdsc *mmcc_msm8998_gdscs[] = {
+       [VIDEO_TOP_GDSC] = &video_top_gdsc,
+       [VIDEO_SUBCORE0_GDSC] = &video_subcore0_gdsc,
+       [VIDEO_SUBCORE1_GDSC] = &video_subcore1_gdsc,
+       [MDSS_GDSC] = &mdss_gdsc,
+       [CAMSS_TOP_GDSC] = &camss_top_gdsc,
+       [CAMSS_VFE0_GDSC] = &camss_vfe0_gdsc,
+       [CAMSS_VFE1_GDSC] = &camss_vfe1_gdsc,
+       [CAMSS_CPP_GDSC] = &camss_cpp_gdsc,
+       [BIMC_SMMU_GDSC] = &bimc_smmu_gdsc,
+};
+
+static const struct qcom_reset_map mmcc_msm8998_resets[] = {
+       [SPDM_BCR] = { 0x200 },
+       [SPDM_RM_BCR] = { 0x300 },
+       [MISC_BCR] = { 0x320 },
+       [VIDEO_TOP_BCR] = { 0x1020 },
+       [THROTTLE_VIDEO_BCR] = { 0x1180 },
+       [MDSS_BCR] = { 0x2300 },
+       [THROTTLE_MDSS_BCR] = { 0x2460 },
+       [CAMSS_PHY0_BCR] = { 0x3020 },
+       [CAMSS_PHY1_BCR] = { 0x3050 },
+       [CAMSS_PHY2_BCR] = { 0x3080 },
+       [CAMSS_CSI0_BCR] = { 0x30b0 },
+       [CAMSS_CSI0RDI_BCR] = { 0x30d0 },
+       [CAMSS_CSI0PIX_BCR] = { 0x30e0 },
+       [CAMSS_CSI1_BCR] = { 0x3120 },
+       [CAMSS_CSI1RDI_BCR] = { 0x3140 },
+       [CAMSS_CSI1PIX_BCR] = { 0x3150 },
+       [CAMSS_CSI2_BCR] = { 0x3180 },
+       [CAMSS_CSI2RDI_BCR] = { 0x31a0 },
+       [CAMSS_CSI2PIX_BCR] = { 0x31b0 },
+       [CAMSS_CSI3_BCR] = { 0x31e0 },
+       [CAMSS_CSI3RDI_BCR] = { 0x3200 },
+       [CAMSS_CSI3PIX_BCR] = { 0x3210 },
+       [CAMSS_ISPIF_BCR] = { 0x3220 },
+       [CAMSS_CCI_BCR] = { 0x3340 },
+       [CAMSS_TOP_BCR] = { 0x3480 },
+       [CAMSS_AHB_BCR] = { 0x3488 },
+       [CAMSS_MICRO_BCR] = { 0x3490 },
+       [CAMSS_JPEG_BCR] = { 0x35a0 },
+       [CAMSS_VFE0_BCR] = { 0x3660 },
+       [CAMSS_VFE1_BCR] = { 0x3670 },
+       [CAMSS_VFE_VBIF_BCR] = { 0x36a0 },
+       [CAMSS_CPP_TOP_BCR] = { 0x36c0 },
+       [CAMSS_CPP_BCR] = { 0x36d0 },
+       [CAMSS_CSI_VFE0_BCR] = { 0x3700 },
+       [CAMSS_CSI_VFE1_BCR] = { 0x3710 },
+       [CAMSS_FD_BCR] = { 0x3b60 },
+       [THROTTLE_CAMSS_BCR] = { 0x3c30 },
+       [MNOCAHB_BCR] = { 0x5020 },
+       [MNOCAXI_BCR] = { 0xd020 },
+       [BMIC_SMMU_BCR] = { 0xe000 },
+       [MNOC_MAXI_BCR] = { 0xf000 },
+       [VMEM_BCR] = { 0xf060 },
+       [BTO_BCR] = { 0x10004 },
+};
+
+static const struct regmap_config mmcc_msm8998_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x10004,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc mmcc_msm8998_desc = {
+       .config = &mmcc_msm8998_regmap_config,
+       .clks = mmcc_msm8998_clocks,
+       .num_clks = ARRAY_SIZE(mmcc_msm8998_clocks),
+       .resets = mmcc_msm8998_resets,
+       .num_resets = ARRAY_SIZE(mmcc_msm8998_resets),
+       .gdscs = mmcc_msm8998_gdscs,
+       .num_gdscs = ARRAY_SIZE(mmcc_msm8998_gdscs),
+       .clk_hws = mmcc_msm8998_hws,
+       .num_clk_hws = ARRAY_SIZE(mmcc_msm8998_hws),
+};
+
+static const struct of_device_id mmcc_msm8998_match_table[] = {
+       { .compatible = "qcom,mmcc-msm8998" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8998_match_table);
+
+static int mmcc_msm8998_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &mmcc_msm8998_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       return qcom_cc_really_probe(pdev, &mmcc_msm8998_desc, regmap);
+}
+
+static struct platform_driver mmcc_msm8998_driver = {
+       .probe          = mmcc_msm8998_probe,
+       .driver         = {
+               .name   = "mmcc-msm8998",
+               .of_match_table = mmcc_msm8998_match_table,
+       },
+};
+module_platform_driver(mmcc_msm8998_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8998 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c
new file mode 100644 (file)
index 0000000..76add30
--- /dev/null
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,videocc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+       P_BI_TCXO,
+       P_CHIP_SLEEP_CLK,
+       P_CORE_BI_PLL_TEST_SE,
+       P_VIDEO_PLL0_OUT_EVEN,
+       P_VIDEO_PLL0_OUT_MAIN,
+       P_VIDEO_PLL0_OUT_ODD,
+};
+
+static const struct pll_vco fabia_vco[] = {
+       { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll video_pll0 = {
+       .offset = 0x42c,
+       .vco_table = fabia_vco,
+       .num_vco = ARRAY_SIZE(fabia_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_pll0",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fabia_ops,
+               },
+       },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+       { P_BI_TCXO, 0 },
+       { P_VIDEO_PLL0_OUT_MAIN, 1 },
+       { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &video_pll0.clkr.hw },
+       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(150000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
+       F(270000000, P_VIDEO_PLL0_OUT_MAIN, 2.5, 0, 0),
+       F(340000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+       F(434000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+       F(500000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 video_cc_venus_clk_src = {
+       .cmd_rcgr = 0x7f0,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = video_cc_parent_map_1,
+       .freq_tbl = ftbl_video_cc_venus_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "video_cc_venus_clk_src",
+               .parent_data = video_cc_parent_data_1,
+               .num_parents = 3,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_branch video_cc_vcodec0_axi_clk = {
+       .halt_reg = 0x9ec,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x9ec,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_cc_vcodec0_axi_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch video_cc_vcodec0_core_clk = {
+       .halt_reg = 0x890,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x890,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_cc_vcodec0_core_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &video_cc_venus_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+       .halt_reg = 0xa4c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0xa4c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_cc_venus_ahb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch video_cc_venus_ctl_axi_clk = {
+       .halt_reg = 0x9cc,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x9cc,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_cc_venus_ctl_axi_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch video_cc_venus_ctl_core_clk = {
+       .halt_reg = 0x850,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x850,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "video_cc_venus_ctl_core_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &video_cc_venus_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc venus_gdsc = {
+       .gdscr = 0x814,
+       .pd = {
+               .name = "venus_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vcodec0_gdsc = {
+       .gdscr = 0x874,
+       .pd = {
+               .name = "vcodec0_gdsc",
+       },
+       .flags = HW_CTRL,
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *video_cc_sc7180_clocks[] = {
+       [VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
+       [VIDEO_CC_VCODEC0_CORE_CLK] = &video_cc_vcodec0_core_clk.clkr,
+       [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+       [VIDEO_CC_VENUS_CLK_SRC] = &video_cc_venus_clk_src.clkr,
+       [VIDEO_CC_VENUS_CTL_AXI_CLK] = &video_cc_venus_ctl_axi_clk.clkr,
+       [VIDEO_CC_VENUS_CTL_CORE_CLK] = &video_cc_venus_ctl_core_clk.clkr,
+       [VIDEO_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_sc7180_gdscs[] = {
+       [VENUS_GDSC] = &venus_gdsc,
+       [VCODEC0_GDSC] = &vcodec0_gdsc,
+};
+
+static const struct regmap_config video_cc_sc7180_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .max_register = 0xb94,
+       .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sc7180_desc = {
+       .config = &video_cc_sc7180_regmap_config,
+       .clks = video_cc_sc7180_clocks,
+       .num_clks = ARRAY_SIZE(video_cc_sc7180_clocks),
+       .gdscs = video_cc_sc7180_gdscs,
+       .num_gdscs = ARRAY_SIZE(video_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id video_cc_sc7180_match_table[] = {
+       { .compatible = "qcom,sc7180-videocc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sc7180_match_table);
+
+static int video_cc_sc7180_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+       struct alpha_pll_config video_pll0_config = {};
+
+       regmap = qcom_cc_map(pdev, &video_cc_sc7180_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       video_pll0_config.l = 0x1f;
+       video_pll0_config.alpha = 0x4000;
+       video_pll0_config.user_ctl_val = 0x00000001;
+       video_pll0_config.user_ctl_hi_val = 0x00004805;
+
+       clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+       /* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
+       regmap_update_bits(regmap, 0x984, 0x1, 0x1);
+
+       return qcom_cc_really_probe(pdev, &video_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver video_cc_sc7180_driver = {
+       .probe = video_cc_sc7180_probe,
+       .driver = {
+               .name = "sc7180-videocc",
+               .of_match_table = video_cc_sc7180_match_table,
+       },
+};
+
+static int __init video_cc_sc7180_init(void)
+{
+       return platform_driver_register(&video_cc_sc7180_driver);
+}
+subsys_initcall(video_cc_sc7180_init);
+
+static void __exit video_cc_sc7180_exit(void)
+{
+       platform_driver_unregister(&video_cc_sc7180_driver);
+}
+module_exit(video_cc_sc7180_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI VIDEOCC SC7180 Driver");
index 4cd846b..250d816 100644 (file)
@@ -20,8 +20,8 @@ config CLK_RENESAS
        select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
        select CLK_R8A7792 if ARCH_R8A7792
        select CLK_R8A7794 if ARCH_R8A7794
-       select CLK_R8A7795 if ARCH_R8A7795
-       select CLK_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+       select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951 || ARCH_R8A7795
+       select CLK_R8A77960 if ARCH_R8A77960
        select CLK_R8A77961 if ARCH_R8A77961
        select CLK_R8A77965 if ARCH_R8A77965
        select CLK_R8A77970 if ARCH_R8A77970
index cf65d4e..443bff0 100644 (file)
@@ -93,6 +93,7 @@ static const struct mssr_mod_clk r7s9210_mod_clks[] __initconst = {
        DEF_MOD_STB("ether1",    64,    R7S9210_CLK_B),
        DEF_MOD_STB("ether0",    65,    R7S9210_CLK_B),
 
+       DEF_MOD_STB("spibsc",    83,    R7S9210_CLK_P1),
        DEF_MOD_STB("i2c3",      84,    R7S9210_CLK_P1),
        DEF_MOD_STB("i2c2",      85,    R7S9210_CLK_P1),
        DEF_MOD_STB("i2c1",      86,    R7S9210_CLK_P1),
index db2f57e..bdcd4a3 100644 (file)
@@ -24,10 +24,10 @@ enum rcar_gen2_clk_types {
 };
 
 struct rcar_gen2_cpg_pll_config {
-       unsigned int extal_div;
-       unsigned int pll1_mult;
-       unsigned int pll3_mult;
-       unsigned int pll0_mult;         /* leave as zero if PLL0CR exists */
+       u8 extal_div;
+       u8 pll1_mult;
+       u8 pll3_mult;
+       u8 pll0_mult;           /* leave as zero if PLL0CR exists */
 };
 
 struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
index c97b647..488f8b3 100644 (file)
@@ -470,7 +470,8 @@ static struct clk * __init cpg_rpc_clk_register(const char *name,
 
        clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
                                     &rpc->div.hw,  &clk_divider_ops,
-                                    &rpc->gate.hw, &clk_gate_ops, 0);
+                                    &rpc->gate.hw, &clk_gate_ops,
+                                    CLK_SET_RATE_PARENT);
        if (IS_ERR(clk)) {
                kfree(rpc);
                return clk;
@@ -506,7 +507,8 @@ static struct clk * __init cpg_rpcd2_clk_register(const char *name,
 
        clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
                                     &rpcd2->fixed.hw, &clk_fixed_factor_ops,
-                                    &rpcd2->gate.hw, &clk_gate_ops, 0);
+                                    &rpcd2->gate.hw, &clk_gate_ops,
+                                    CLK_SET_RATE_PARENT);
        if (IS_ERR(clk))
                kfree(rpcd2);
 
index 198417d..10560d9 100644 (file)
@@ -282,7 +282,7 @@ static int rockchip_rk3036_pll_is_enabled(struct clk_hw *hw)
        return !(pllcon & RK3036_PLLCON1_PWRDOWN);
 }
 
-static void rockchip_rk3036_pll_init(struct clk_hw *hw)
+static int rockchip_rk3036_pll_init(struct clk_hw *hw)
 {
        struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
        const struct rockchip_pll_rate_table *rate;
@@ -290,14 +290,14 @@ static void rockchip_rk3036_pll_init(struct clk_hw *hw)
        unsigned long drate;
 
        if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
-               return;
+               return 0;
 
        drate = clk_hw_get_rate(hw);
        rate = rockchip_get_pll_settings(pll, drate);
 
        /* when no rate setting for the current rate, rely on clk_set_rate */
        if (!rate)
-               return;
+               return 0;
 
        rockchip_rk3036_pll_get_params(pll, &cur);
 
@@ -319,13 +319,15 @@ static void rockchip_rk3036_pll_init(struct clk_hw *hw)
                if (!parent) {
                        pr_warn("%s: parent of %s not available\n",
                                __func__, __clk_get_name(hw->clk));
-                       return;
+                       return 0;
                }
 
                pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
                         __func__, __clk_get_name(hw->clk));
                rockchip_rk3036_pll_set_params(pll, rate);
        }
+
+       return 0;
 }
 
 static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = {
@@ -515,7 +517,7 @@ static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
        return !(pllcon & RK3066_PLLCON3_PWRDOWN);
 }
 
-static void rockchip_rk3066_pll_init(struct clk_hw *hw)
+static int rockchip_rk3066_pll_init(struct clk_hw *hw)
 {
        struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
        const struct rockchip_pll_rate_table *rate;
@@ -523,14 +525,14 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
        unsigned long drate;
 
        if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
-               return;
+               return 0;
 
        drate = clk_hw_get_rate(hw);
        rate = rockchip_get_pll_settings(pll, drate);
 
        /* when no rate setting for the current rate, rely on clk_set_rate */
        if (!rate)
-               return;
+               return 0;
 
        rockchip_rk3066_pll_get_params(pll, &cur);
 
@@ -543,6 +545,8 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
                         __func__, clk_hw_get_name(hw));
                rockchip_rk3066_pll_set_params(pll, rate);
        }
+
+       return 0;
 }
 
 static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
@@ -761,7 +765,7 @@ static int rockchip_rk3399_pll_is_enabled(struct clk_hw *hw)
        return !(pllcon & RK3399_PLLCON3_PWRDOWN);
 }
 
-static void rockchip_rk3399_pll_init(struct clk_hw *hw)
+static int rockchip_rk3399_pll_init(struct clk_hw *hw)
 {
        struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
        const struct rockchip_pll_rate_table *rate;
@@ -769,14 +773,14 @@ static void rockchip_rk3399_pll_init(struct clk_hw *hw)
        unsigned long drate;
 
        if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
-               return;
+               return 0;
 
        drate = clk_hw_get_rate(hw);
        rate = rockchip_get_pll_settings(pll, drate);
 
        /* when no rate setting for the current rate, rely on clk_set_rate */
        if (!rate)
-               return;
+               return 0;
 
        rockchip_rk3399_pll_get_params(pll, &cur);
 
@@ -798,13 +802,15 @@ static void rockchip_rk3399_pll_init(struct clk_hw *hw)
                if (!parent) {
                        pr_warn("%s: parent of %s not available\n",
                                __func__, __clk_get_name(hw->clk));
-                       return;
+                       return 0;
                }
 
                pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
                         __func__, __clk_get_name(hw->clk));
                rockchip_rk3399_pll_set_params(pll, rate);
        }
+
+       return 0;
 }
 
 static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
index 49bd7a4..5f66bf8 100644 (file)
@@ -921,11 +921,26 @@ static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
        .num_resets     = ARRAY_SIZE(sun50i_a64_ccu_resets),
 };
 
+static struct ccu_pll_nb sun50i_a64_pll_cpu_nb = {
+       .common = &pll_cpux_clk.common,
+       /* copy from pll_cpux_clk */
+       .enable = BIT(31),
+       .lock   = BIT(28),
+};
+
+static struct ccu_mux_nb sun50i_a64_cpu_nb = {
+       .common         = &cpux_clk.common,
+       .cm             = &cpux_clk.mux,
+       .delay_us       = 1, /* > 8 clock cycles at 24 MHz */
+       .bypass_index   = 1, /* index of 24 MHz oscillator */
+};
+
 static int sun50i_a64_ccu_probe(struct platform_device *pdev)
 {
        struct resource *res;
        void __iomem *reg;
        u32 val;
+       int ret;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        reg = devm_ioremap_resource(&pdev->dev, res);
@@ -939,7 +954,18 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
 
        writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
 
-       return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+       ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+       if (ret)
+               return ret;
+
+       /* Gate then ungate PLL CPU after any rate changes */
+       ccu_pll_notifier_register(&sun50i_a64_pll_cpu_nb);
+
+       /* Reparent CPU during PLL CPU rate changes */
+       ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+                                 &sun50i_a64_cpu_nb);
+
+       return 0;
 }
 
 static const struct of_device_id sun50i_a64_ccu_ids[] = {
index 9799292..116e6f8 100644 (file)
@@ -36,7 +36,6 @@
 #define CLK_PLL_HSIC                   18
 #define CLK_PLL_DE                     19
 #define CLK_PLL_DDR1                   20
-#define CLK_CPUX                       21
 #define CLK_AXI                                22
 #define CLK_APB                                23
 #define CLK_AHB1                       24
index a361388..3ed2a59 100644 (file)
@@ -32,7 +32,9 @@
 /* The PLL_VIDEO1_2X clock is exported */
 
 #define CLK_PLL_GPU            14
-#define CLK_PLL_MIPI           15
+
+/* The PLL_VIDEO1_2X clock is exported */
+
 #define CLK_PLL9               16
 #define CLK_PLL10              17
 
index 72df692..5bf5c4d 100644 (file)
@@ -24,7 +24,9 @@
 #define CLK_PLL_PERIPH         10
 #define CLK_PLL_PERIPH_2X      11
 #define CLK_PLL_GPU            12
-#define CLK_PLL_MIPI           13
+
+/* The PLL MIPI clock is exported */
+
 #define CLK_PLL_HSIC           14
 #define CLK_PLL_DE             15
 #define CLK_PLL_DDR1           16
index a69637b..6f7071d 100644 (file)
 
 /* Some more module clocks are exported */
 
-#define CLK_MBUS               155
-
-/* Another bunch of module clocks are exported */
-
 #define CLK_NUMBER             (CLK_OUTB + 1)
 
 #endif /* _CCU_SUN8I_R40_H_ */
index a165e71..4c75b07 100644 (file)
@@ -37,7 +37,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct clk_onecell_data *clk_data;
-       const struct of_device_id *device;
        const struct gates_data *data;
        const char *clk_parent;
        const char *clk_name;
@@ -50,10 +49,9 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
        if (!np)
                return -ENODEV;
 
-       device = of_match_device(sun6i_a31_apb0_gates_clk_dt_ids, &pdev->dev);
-       if (!device)
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data)
                return -ENODEV;
-       data = device->data;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        reg = devm_ioremap_resource(&pdev->dev, r);
index c051d92..cfbaa90 100644 (file)
@@ -1487,7 +1487,6 @@ static int dfll_init(struct tegra_dfll *td)
        td->last_unrounded_rate = 0;
 
        pm_runtime_enable(td->dev);
-       pm_runtime_irq_safe(td->dev);
        pm_runtime_get_sync(td->dev);
 
        dfll_set_mode(td, DFLL_DISABLED);
@@ -1516,7 +1515,7 @@ di_err1:
 
 /**
  * tegra_dfll_suspend - check DFLL is disabled
- * @dev: DFLL device *
+ * @dev: DFLL instance
  *
  * DFLL clock should be disabled by the CPUFreq driver. So, make
  * sure it is disabled and disable all clocks needed by the DFLL.
index ca0de5f..38daf48 100644 (file)
@@ -40,8 +40,13 @@ static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
        int div, mul;
        u64 rate = parent_rate;
 
-       reg = readl_relaxed(divider->reg) >> divider->shift;
-       div = reg & div_mask(divider);
+       reg = readl_relaxed(divider->reg);
+
+       if ((divider->flags & TEGRA_DIVIDER_UART) &&
+           !(reg & PERIPH_CLK_UART_DIV_ENB))
+               return rate;
+
+       div = (reg >> divider->shift) & div_mask(divider);
 
        mul = get_mul(divider);
        div += mul;
index 0d07c0b..2b2a3b8 100644 (file)
@@ -777,7 +777,11 @@ static struct tegra_periph_init_data gate_clks[] = {
        GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
        GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
        GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
-       GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
+       /*
+        * Critical for RAM re-repair operation, which must occur on resume
+        * from LP1 system suspend and as part of CCPLEX cluster switching.
+        */
+       GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
        GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
        GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
        GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
index 4d8222f..fff5cba 100644 (file)
@@ -1046,11 +1046,9 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA20_CLK_SBC3, TEGRA20_CLK_PLL_P, 100000000, 0 },
        { TEGRA20_CLK_SBC4, TEGRA20_CLK_PLL_P, 100000000, 0 },
        { TEGRA20_CLK_HOST1X, TEGRA20_CLK_PLL_C, 150000000, 0 },
-       { TEGRA20_CLK_DISP1, TEGRA20_CLK_PLL_P, 600000000, 0 },
-       { TEGRA20_CLK_DISP2, TEGRA20_CLK_PLL_P, 600000000, 0 },
        { TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0 },
        { TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0 },
-       { TEGRA20_CLK_VDE, TEGRA20_CLK_CLK_MAX, 300000000, 0 },
+       { TEGRA20_CLK_VDE, TEGRA20_CLK_PLL_C, 300000000, 0 },
        /* must be the last entry */
        { TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
 };
index c8bc18e..b208914 100644 (file)
@@ -1251,14 +1251,12 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA30_CLK_SBC6, TEGRA30_CLK_PLL_P, 100000000, 0 },
        { TEGRA30_CLK_PLL_C, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
        { TEGRA30_CLK_HOST1X, TEGRA30_CLK_PLL_C, 150000000, 0 },
-       { TEGRA30_CLK_DISP1, TEGRA30_CLK_PLL_P, 600000000, 0 },
-       { TEGRA30_CLK_DISP2, TEGRA30_CLK_PLL_P, 600000000, 0 },
        { TEGRA30_CLK_TWD, TEGRA30_CLK_CLK_MAX, 0, 1 },
        { TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0 },
        { TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
        { TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
        { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
-       { TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
+       { TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 600000000, 0 },
        { TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
        { TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
        { TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
index c9608e5..14d98a8 100644 (file)
@@ -35,6 +35,20 @@ static const struct omap_clkctrl_reg_data omap5_dsp_clkctrl_regs[] __initconst =
        { 0 },
 };
 
+static const char * const omap5_aess_fclk_parents[] __initconst = {
+       "abe_clk",
+       NULL,
+};
+
+static const struct omap_clkctrl_div_data omap5_aess_fclk_data __initconst = {
+       .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = {
+       { 24, TI_CLK_DIVIDER, omap5_aess_fclk_parents, &omap5_aess_fclk_data },
+       { 0 },
+};
+
 static const char * const omap5_dmic_gfclk_parents[] __initconst = {
        "abe_cm:clk:0018:26",
        "pad_clks_ck",
@@ -122,6 +136,7 @@ static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst =
 
 static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = {
        { OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" },
+       { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
        { OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
        { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
        { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
index 5f46782..14b6450 100644 (file)
@@ -146,6 +146,29 @@ static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst =
        { 0 },
 };
 
+static const char * const dra7_cam_gfclk_mux_parents[] __initconst = {
+       "l3_iclk_div",
+       "core_iss_main_clk",
+       NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_cam_bit_data[] __initconst = {
+       { 24, TI_CLK_MUX, dra7_cam_gfclk_mux_parents, NULL },
+       { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_cam_clkctrl_regs[] __initconst = {
+       { DRA7_CAM_VIP1_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+       { DRA7_CAM_VIP2_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+       { DRA7_CAM_VIP3_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+       { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_vpe_clkctrl_regs[] __initconst = {
+       { DRA7_VPE_VPE_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h23x2_ck" },
+       { 0 },
+};
+
 static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = {
        { DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
        { DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
@@ -275,6 +298,40 @@ static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst =
        { 0 },
 };
 
+static const char * const dra7_gpu_core_mux_parents[] __initconst = {
+       "dpll_core_h14x2_ck",
+       "dpll_per_h14x2_ck",
+       "dpll_gpu_m2_ck",
+       NULL,
+};
+
+static const char * const dra7_gpu_hyd_mux_parents[] __initconst = {
+       "dpll_core_h14x2_ck",
+       "dpll_per_h14x2_ck",
+       "dpll_gpu_m2_ck",
+       NULL,
+};
+
+static const char * const dra7_gpu_sys_clk_parents[] __initconst = {
+       "sys_clkin",
+       NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_gpu_sys_clk_data __initconst = {
+       .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = {
+       { 24, TI_CLK_MUX, dra7_gpu_core_mux_parents, NULL, },
+       { 26, TI_CLK_MUX, dra7_gpu_hyd_mux_parents, NULL, },
+       { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_gpu_clkctrl_regs[] __initconst = {
+       { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24", },
+       { 0 },
+};
+
 static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = {
        "func_128m_clk",
        "dpll_per_m2x2_ck",
@@ -405,7 +462,7 @@ static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
 };
 
 static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = {
-       { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck" },
+       { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "gmac_main_clk" },
        { 0 },
 };
 
@@ -769,6 +826,7 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
        { 0x4a005550, dra7_ipu_clkctrl_regs },
        { 0x4a005620, dra7_dsp2_clkctrl_regs },
        { 0x4a005720, dra7_rtc_clkctrl_regs },
+       { 0x4a005760, dra7_vpe_clkctrl_regs },
        { 0x4a008620, dra7_coreaon_clkctrl_regs },
        { 0x4a008720, dra7_l3main1_clkctrl_regs },
        { 0x4a008920, dra7_ipu2_clkctrl_regs },
@@ -777,7 +835,9 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
        { 0x4a008c00, dra7_atl_clkctrl_regs },
        { 0x4a008d20, dra7_l4cfg_clkctrl_regs },
        { 0x4a008e20, dra7_l3instr_clkctrl_regs },
+       { 0x4a009020, dra7_cam_clkctrl_regs },
        { 0x4a009120, dra7_dss_clkctrl_regs },
+       { 0x4a009220, dra7_gpu_clkctrl_regs },
        { 0x4a009320, dra7_l3init_clkctrl_regs },
        { 0x4a0093b0, dra7_pcie_clkctrl_regs },
        { 0x4a0093d0, dra7_gmac_clkctrl_regs },
index e0b8ed3..3da33c7 100644 (file)
@@ -171,7 +171,9 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
                node = of_find_node_by_name(NULL, buf);
                if (num_args && compat_mode) {
                        parent = node;
-                       node = of_get_child_by_name(parent, "clk");
+                       node = of_get_child_by_name(parent, "clock");
+                       if (!node)
+                               node = of_get_child_by_name(parent, "clk");
                        of_node_put(parent);
                }
 
index 17b9a76..0622660 100644 (file)
@@ -440,6 +440,63 @@ static void __init _clkctrl_add_provider(void *data,
        of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
 }
 
+/* Get clock name based on compatible string for clkctrl */
+static char * __init clkctrl_get_name(struct device_node *np)
+{
+       struct property *prop;
+       const int prefix_len = 11;
+       const char *compat;
+       char *name;
+
+       of_property_for_each_string(np, "compatible", prop, compat) {
+               if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
+                       /* Two letter minimum name length for l3, l4 etc */
+                       if (strnlen(compat + prefix_len, 16) < 2)
+                               continue;
+                       name = kasprintf(GFP_KERNEL, "%s", compat + prefix_len);
+                       if (!name)
+                               continue;
+                       strreplace(name, '-', '_');
+
+                       return name;
+               }
+       }
+       of_node_put(np);
+
+       return NULL;
+}
+
+/* Get clkctrl clock base name based on clkctrl_name or dts node */
+static const char * __init clkctrl_get_clock_name(struct device_node *np,
+                                                 const char *clkctrl_name,
+                                                 int offset, int index,
+                                                 bool legacy_naming)
+{
+       char *clock_name;
+
+       /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
+       if (clkctrl_name && !legacy_naming) {
+               clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+                                      clkctrl_name, offset, index);
+               strreplace(clock_name, '_', '-');
+
+               return clock_name;
+       }
+
+       /* l4per:1234:0 old style naming based on clkctrl_name */
+       if (clkctrl_name)
+               return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
+                                clkctrl_name, offset, index);
+
+       /* l4per_cm:1234:0 old style naming based on parent node name */
+       if (legacy_naming)
+               return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
+                                np->parent, offset, index);
+
+       /* l4per-clkctrl:1234:0 style naming based on node name */
+       return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
+}
+
 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 {
        struct omap_clkctrl_provider *provider;
@@ -448,8 +505,10 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
        struct clk_init_data init = { NULL };
        struct clk_hw_omap *hw;
        struct clk *clk;
-       struct omap_clkctrl_clk *clkctrl_clk;
+       struct omap_clkctrl_clk *clkctrl_clk = NULL;
        const __be32 *addrp;
+       bool legacy_naming;
+       char *clkctrl_name;
        u32 addr;
        int ret;
        char *c;
@@ -537,7 +596,19 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 
        provider->base = of_iomap(node, 0);
 
-       if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) {
+       legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
+       clkctrl_name = clkctrl_get_name(node);
+       if (clkctrl_name) {
+               provider->clkdm_name = kasprintf(GFP_KERNEL,
+                                                "%s_clkdm", clkctrl_name);
+               goto clkdm_found;
+       }
+
+       /*
+        * The code below can be removed when all clkctrl nodes use domain
+        * specific compatible proprerty and standard clock node naming
+        */
+       if (legacy_naming) {
                provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
                if (!provider->clkdm_name) {
                        kfree(provider);
@@ -573,7 +644,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
                        *c = '_';
                c++;
        }
-
+clkdm_found:
        INIT_LIST_HEAD(&provider->clocks);
 
        /* Generate clocks */
@@ -612,15 +683,15 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
                init.flags = 0;
                if (reg_data->flags & CLKF_SET_RATE_PARENT)
                        init.flags |= CLK_SET_RATE_PARENT;
-               if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-                       init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
-                                             node->parent, node,
-                                             reg_data->offset, 0);
-               else
-                       init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d",
-                                             node, reg_data->offset, 0);
+
+               init.name = clkctrl_get_clock_name(node, clkctrl_name,
+                                                  reg_data->offset, 0,
+                                                  legacy_naming);
+               if (!init.name)
+                       goto cleanup;
+
                clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
-               if (!init.name || !clkctrl_clk)
+               if (!clkctrl_clk)
                        goto cleanup;
 
                init.ops = &omap4_clkctrl_clk_ops;
@@ -642,11 +713,14 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
        if (ret == -EPROBE_DEFER)
                ti_clk_retry_init(node, provider, _clkctrl_add_provider);
 
+       kfree(clkctrl_name);
+
        return;
 
 cleanup:
        kfree(hw);
        kfree(init.name);
+       kfree(clkctrl_name);
        kfree(clkctrl_clk);
 }
 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
index e6995c0..f1dd62d 100644 (file)
@@ -253,7 +253,7 @@ extern const struct clk_ops omap_gate_clk_ops;
 
 extern struct ti_clk_features ti_clk_features;
 
-void omap2_init_clk_clkdm(struct clk_hw *hw);
+int omap2_init_clk_clkdm(struct clk_hw *hw);
 int omap2_clkops_enable_clkdm(struct clk_hw *hw);
 void omap2_clkops_disable_clkdm(struct clk_hw *hw);
 
index 423a99b..ee56306 100644 (file)
@@ -101,16 +101,16 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw)
  *
  * Convert a clockdomain name stored in a struct clk 'clk' into a
  * clockdomain pointer, and save it into the struct clk.  Intended to be
- * called during clk_register().  No return value.
+ * called during clk_register(). Returns 0 on success, -EERROR otherwise.
  */
-void omap2_init_clk_clkdm(struct clk_hw *hw)
+int omap2_init_clk_clkdm(struct clk_hw *hw)
 {
        struct clk_hw_omap *clk = to_clk_hw_omap(hw);
        struct clockdomain *clkdm;
        const char *clk_name;
 
        if (!clk->clkdm_name)
-               return;
+               return 0;
 
        clk_name = __clk_get_name(hw->clk);
 
@@ -123,6 +123,8 @@ void omap2_init_clk_clkdm(struct clk_hw *hw)
                pr_debug("clock: could not associate clk %s to clkdm %s\n",
                         clk_name, clk->clkdm_name);
        }
+
+       return 0;
 }
 
 static void __init of_ti_clockdomain_setup(struct device_node *node)
index 9caa529..3e32db9 100644 (file)
@@ -18,8 +18,8 @@
 #define UNIPHIER_PERI_CLK_FI2C(idx, ch)                                        \
        UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
 
-#define UNIPHIER_PERI_CLK_SCSSI(idx)                                   \
-       UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17)
+#define UNIPHIER_PERI_CLK_SCSSI(idx, ch)                               \
+       UNIPHIER_CLK_GATE("scssi" #ch, (idx), "spi", 0x20, 17 + (ch))
 
 #define UNIPHIER_PERI_CLK_MCSSI(idx)                                   \
        UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
@@ -35,7 +35,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
        UNIPHIER_PERI_CLK_I2C(6, 2),
        UNIPHIER_PERI_CLK_I2C(7, 3),
        UNIPHIER_PERI_CLK_I2C(8, 4),
-       UNIPHIER_PERI_CLK_SCSSI(11),
+       UNIPHIER_PERI_CLK_SCSSI(11, 0),
        { /* sentinel */ }
 };
 
@@ -51,7 +51,10 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
        UNIPHIER_PERI_CLK_FI2C(8, 4),
        UNIPHIER_PERI_CLK_FI2C(9, 5),
        UNIPHIER_PERI_CLK_FI2C(10, 6),
-       UNIPHIER_PERI_CLK_SCSSI(11),
-       UNIPHIER_PERI_CLK_MCSSI(12),
+       UNIPHIER_PERI_CLK_SCSSI(11, 0),
+       UNIPHIER_PERI_CLK_SCSSI(12, 1),
+       UNIPHIER_PERI_CLK_SCSSI(13, 2),
+       UNIPHIER_PERI_CLK_SCSSI(14, 3),
+       UNIPHIER_PERI_CLK_MCSSI(15),
        { /* sentinel */ }
 };
index 72ed97c..0aedd42 100644 (file)
@@ -99,8 +99,10 @@ static void u8500_clk_init(struct device_node *np)
        if (fw_version != NULL) {
                switch (fw_version->project) {
                case PRCMU_FW_PROJECT_U8500_C2:
+               case PRCMU_FW_PROJECT_U8500_MBL:
                case PRCMU_FW_PROJECT_U8520:
                case PRCMU_FW_PROJECT_U8420:
+               case PRCMU_FW_PROJECT_U8420_SYSCLK:
                        sgaclk_parent = "soc0_pll";
                        break;
                default:
index ac76685..c2618f1 100644 (file)
@@ -9,7 +9,7 @@ config COMMON_CLK_VERSATILE
                COMPILE_TEST
        select REGMAP_MMIO
        ---help---
-          Supports clocking on ARM Reference designs:
+         Supports clocking on ARM Reference designs:
          - Integrator/AP and Integrator/CP
          - RealView PB1176, EB, PB11MP and PBX
          - Versatile Express
index a11f93e..10e89f2 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Zynq UltraScale+ MPSoC clock controller
  *
- *  Copyright (C) 2016-2018 Xilinx
+ *  Copyright (C) 2016-2019 Xilinx
  *
  * Based on drivers/clk/zynq/clkc.c
  */
@@ -749,6 +749,7 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
 
 static const struct of_device_id zynqmp_clock_of_match[] = {
        {.compatible = "xlnx,zynqmp-clk"},
+       {.compatible = "xlnx,versal-clk"},
        {},
 };
 MODULE_DEVICE_TABLE(of, zynqmp_clock_of_match);
index d8f5b70..4be2cc7 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Zynq UltraScale+ MPSoC Divider support
  *
- *  Copyright (C) 2016-2018 Xilinx
+ *  Copyright (C) 2016-2019 Xilinx
  *
  * Adjustable divider clock implementation
  */
@@ -41,12 +41,30 @@ struct zynqmp_clk_divider {
        bool is_frac;
        u32 clk_id;
        u32 div_type;
+       u16 max_div;
 };
 
 static inline int zynqmp_divider_get_val(unsigned long parent_rate,
-                                        unsigned long rate)
+                                        unsigned long rate, u16 flags)
 {
-       return DIV_ROUND_CLOSEST(parent_rate, rate);
+       int up, down;
+       unsigned long up_rate, down_rate;
+
+       if (flags & CLK_DIVIDER_POWER_OF_TWO) {
+               up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+               down = DIV_ROUND_DOWN_ULL((u64)parent_rate, rate);
+
+               up = __roundup_pow_of_two(up);
+               down = __rounddown_pow_of_two(down);
+
+               up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
+               down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
+
+               return (rate - up_rate) <= (down_rate - rate) ? up : down;
+
+       } else {
+               return DIV_ROUND_CLOSEST(parent_rate, rate);
+       }
 }
 
 /**
@@ -78,6 +96,9 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
        else
                value = div >> 16;
 
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               value = 1 << value;
+
        if (!value) {
                WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
                     "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
@@ -88,6 +109,42 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
        return DIV_ROUND_UP_ULL(parent_rate, value);
 }
 
+static void zynqmp_get_divider2_val(struct clk_hw *hw,
+                                   unsigned long rate,
+                                   unsigned long parent_rate,
+                                   struct zynqmp_clk_divider *divider,
+                                   int *bestdiv)
+{
+       int div1;
+       int div2;
+       long error = LONG_MAX;
+       struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+       struct zynqmp_clk_divider *pdivider = to_zynqmp_clk_divider(parent_hw);
+
+       if (!pdivider)
+               return;
+
+       *bestdiv = 1;
+       for (div1 = 1; div1 <= pdivider->max_div;) {
+               for (div2 = 1; div2 <= divider->max_div;) {
+                       long new_error = ((parent_rate / div1) / div2) - rate;
+
+                       if (abs(new_error) < abs(error)) {
+                               *bestdiv = div2;
+                               error = new_error;
+                       }
+                       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+                               div2 = div2 << 1;
+                       else
+                               div2++;
+               }
+               if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
+                       div1 = div1 << 1;
+               else
+                       div1++;
+       }
+}
+
 /**
  * zynqmp_clk_divider_round_rate() - Round rate of divider clock
  * @hw:                        handle between common and hardware-specific interfaces
@@ -120,10 +177,23 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
                else
                        bestdiv  = bestdiv >> 16;
 
+               if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+                       bestdiv = 1 << bestdiv;
+
                return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
        }
 
-       bestdiv = zynqmp_divider_get_val(*prate, rate);
+       bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
+
+       /*
+        * In case of two divisors, compute best divider values and return
+        * divider2 value based on compute value. div1 will  be automatically
+        * set to optimum based on required total divider value.
+        */
+       if (div_type == TYPE_DIV2 &&
+           (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+               zynqmp_get_divider2_val(hw, rate, *prate, divider, &bestdiv);
+       }
 
        if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
                bestdiv = rate % *prate ? 1 : bestdiv;
@@ -151,7 +221,7 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
        int ret;
        const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
 
-       value = zynqmp_divider_get_val(parent_rate, rate);
+       value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
        if (div_type == TYPE_DIV1) {
                div = value & 0xFFFF;
                div |= 0xffff << 16;
@@ -160,6 +230,9 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
                div |= value << 16;
        }
 
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               div = __ffs(div);
+
        ret = eemi_ops->clock_setdivider(clk_id, div);
 
        if (ret)
@@ -175,6 +248,35 @@ static const struct clk_ops zynqmp_clk_divider_ops = {
        .set_rate = zynqmp_clk_divider_set_rate,
 };
 
+/**
+ * zynqmp_clk_get_max_divisor() - Get maximum supported divisor from firmware.
+ * @clk_id:            Id of clock
+ * @type:              Divider type
+ *
+ * Return: Maximum divisor of a clock if query data is successful
+ *        U16_MAX in case of query data is not success
+ */
+u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
+{
+       const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+       struct zynqmp_pm_query_data qdata = {0};
+       u32 ret_payload[PAYLOAD_ARG_CNT];
+       int ret;
+
+       qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
+       qdata.arg1 = clk_id;
+       qdata.arg2 = type;
+       ret = eemi_ops->query_data(qdata, ret_payload);
+       /*
+        * To maintain backward compatibility return maximum possible value
+        * (0xFFFF) if query for max divisor is not successful.
+        */
+       if (ret)
+               return U16_MAX;
+
+       return ret_payload[1];
+}
+
 /**
  * zynqmp_clk_register_divider() - Register a divider clock
  * @name:              Name of this clock
@@ -215,6 +317,12 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
        div->clk_id = clk_id;
        div->div_type = nodes->type;
 
+       /*
+        * To achieve best possible rate, maximum limit of divider is required
+        * while computation.
+        */
+       div->max_div = zynqmp_clk_get_max_divisor(clk_id, nodes->type);
+
        hw = &div->hw;
        ret = clk_hw_register(NULL, hw);
        if (ret) {
index a541397..89b5995 100644 (file)
@@ -188,10 +188,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
                frac = (parent_rate * f) / FRAC_DIV;
 
                ret = eemi_ops->clock_setdivider(clk_id, m);
-               if (ret)
+               if (ret == -EUSERS)
+                       WARN(1, "More than allowed devices are using the %s, which is forbidden\n",
+                            clk_name);
+               else if (ret)
                        pr_warn_once("%s() set divider failed for %s, ret = %d\n",
                                     __func__, clk_name, ret);
-
                eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
 
                return rate + frac;
index f3ef4ed..c3b1283 100644 (file)
@@ -756,22 +756,21 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
        }
        mutex_unlock(&dma_list_mutex);
 
-       if (!IS_ERR_OR_NULL(chan))
-               goto found;
-
-       return ERR_PTR(-EPROBE_DEFER);
+       if (IS_ERR_OR_NULL(chan))
+               return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 
 found:
-       chan->slave = dev;
        chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
        if (!chan->name)
-               return ERR_PTR(-ENOMEM);
+               return chan;
+       chan->slave = dev;
 
        if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
                              DMA_SLAVE_NAME))
-               dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+               dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
        if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
-               dev_err(dev, "Cannot create DMA %s symlink\n", chan->name);
+               dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
+
        return chan;
 }
 EXPORT_SYMBOL_GPL(dma_request_chan);
@@ -830,13 +829,14 @@ void dma_release_channel(struct dma_chan *chan)
        /* drop PRIVATE cap enabled by __dma_request_channel() */
        if (--chan->device->privatecnt == 0)
                dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+
        if (chan->slave) {
+               sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
                sysfs_remove_link(&chan->slave->kobj, chan->name);
                kfree(chan->name);
                chan->name = NULL;
                chan->slave = NULL;
        }
-       sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
        mutex_unlock(&dma_list_mutex);
 }
 EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -962,6 +962,9 @@ static int __dma_async_device_channel_register(struct dma_device *device,
 
        tchan = list_first_entry_or_null(&device->channels,
                                         struct dma_chan, device_node);
+       if (!tchan)
+               return -ENODEV;
+
        if (tchan->dev) {
                idr_ref = tchan->dev->idr_ref;
        } else {
index 849c50a..6d907fe 100644 (file)
@@ -66,7 +66,7 @@ static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
 
 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
 {
-       return wq->type == IDXD_WQT_USER ? true : false;
+       return wq->type == IDXD_WQT_USER;
 }
 
 static int idxd_config_bus_match(struct device *dev,
index e3850f0..157c959 100644 (file)
@@ -750,7 +750,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
        }
 
        xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+       if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
                ret = EPROBE_DEFER;
                goto disable_reg_clk;
        }
index 899b803..9dda260 100644 (file)
@@ -27,7 +27,7 @@
 
 extern u64 efi_system_table;
 
-#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64)
 #include <asm/ptdump.h>
 
 static struct ptdump_info efi_ptdump_info = {
index 7e12cbd..96758b7 100644 (file)
@@ -104,6 +104,7 @@ struct ibft_control {
        u16 tgt0_off;
        u16 nic1_off;
        u16 tgt1_off;
+       u16 expansion[0];
 } __attribute__((__packed__));
 
 struct ibft_initiator {
@@ -235,7 +236,7 @@ static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
                                "found %d instead!\n", t, id, hdr->id);
                return -ENODEV;
        }
-       if (hdr->length != length) {
+       if (length && hdr->length != length) {
                printk(KERN_ERR "iBFT error: We expected the %s " \
                                "field header.length to have %d but " \
                                "found %d instead!\n", t, length, hdr->length);
@@ -749,16 +750,16 @@ static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
        control = (void *)header + sizeof(*header);
        end = (void *)control + control->hdr.length;
        eot_offset = (void *)header + header->header.length - (void *)control;
-       rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control,
-                            sizeof(*control));
+       rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, 0);
 
        /* iBFT table safety checking */
        rc |= ((control->hdr.index) ? -ENODEV : 0);
+       rc |= ((control->hdr.length < sizeof(*control)) ? -ENODEV : 0);
        if (rc) {
                printk(KERN_ERR "iBFT error: Control header is invalid!\n");
                return rc;
        }
-       for (ptr = &control->initiator_off; ptr < end; ptr += sizeof(u16)) {
+       for (ptr = &control->initiator_off; ptr + sizeof(u16) <= end; ptr += sizeof(u16)) {
                offset = *(u16 *)ptr;
                if (offset && offset < header->header.length &&
                                                offset < eot_offset) {
index 75bdfaa..74d9f13 100644 (file)
@@ -48,6 +48,8 @@ static int zynqmp_pm_ret_code(u32 ret_status)
                return -EACCES;
        case XST_PM_ABORT_SUSPEND:
                return -ECANCELED;
+       case XST_PM_MULT_USER:
+               return -EUSERS;
        case XST_PM_INTERNAL:
        case XST_PM_CONFLICT:
        case XST_PM_INVALID_NODE:
index 4421be0..72b6001 100644 (file)
@@ -308,7 +308,7 @@ devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
        struct gpio_descs *descs;
 
        descs = devm_gpiod_get_array(dev, con_id, flags);
-       if (IS_ERR(descs) && (PTR_ERR(descs) == -ENOENT))
+       if (PTR_ERR(descs) == -ENOENT)
                return NULL;
 
        return descs;
index 1b3f217..c6d30f7 100644 (file)
@@ -484,24 +484,24 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
                        break;
        }
 
-       if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+       if (PTR_ERR(desc) == -ENOENT) {
                /* Special handling for SPI GPIOs if used */
                desc = of_find_spi_gpio(dev, con_id, &of_flags);
        }
 
-       if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+       if (PTR_ERR(desc) == -ENOENT) {
                /* This quirk looks up flags and all */
                desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
                if (!IS_ERR(desc))
                        return desc;
        }
 
-       if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+       if (PTR_ERR(desc) == -ENOENT) {
                /* Special handling for regulator GPIOs if used */
                desc = of_find_regulator_gpio(dev, con_id, &of_flags);
        }
 
-       if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+       if (PTR_ERR(desc) == -ENOENT)
                desc = of_find_arizona_gpio(dev, con_id, &of_flags);
 
        if (IS_ERR(desc))
index 99ac27a..7532834 100644 (file)
@@ -5039,7 +5039,7 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
        struct gpio_descs *descs;
 
        descs = gpiod_get_array(dev, con_id, flags);
-       if (IS_ERR(descs) && (PTR_ERR(descs) == -ENOENT))
+       if (PTR_ERR(descs) == -ENOENT)
                return NULL;
 
        return descs;
index 8f6100d..1c89454 100644 (file)
@@ -751,9 +751,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
        snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
 
        hw = clk_hw_register_mux(dev, clk_name,
-                                (const char *[]){
+                                ((const char *[]){
                                 parent, parent2, parent3, parent4
-                                }, 4, 0, pll_10nm->phy_cmn_mmio +
+                                }), 4, 0, pll_10nm->phy_cmn_mmio +
                                 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
                                 0, 2, 0, NULL);
        if (IS_ERR(hw)) {
index 8c99e01..6dffd7f 100644 (file)
@@ -554,9 +554,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
        snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
        snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
        clks[num++] = clk_register_mux(dev, clk_name,
-                       (const char *[]){
+                       ((const char *[]){
                                parent1, parent2
-                       }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
+                       }), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
                        REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
 
        snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
index eebb4c0..389128b 100644 (file)
@@ -179,7 +179,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
                                    pgoff_t num_prefault)
 {
        struct vm_area_struct *vma = vmf->vma;
-       struct vm_area_struct cvma = *vma;
        struct ttm_buffer_object *bo = vma->vm_private_data;
        struct ttm_bo_device *bdev = bo->bdev;
        unsigned long page_offset;
@@ -250,7 +249,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
                goto out_io_unlock;
        }
 
-       cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
+       prot = ttm_io_prot(bo->mem.placement, prot);
        if (!bo->mem.bus.is_iomem) {
                struct ttm_operation_ctx ctx = {
                        .interruptible = false,
@@ -266,7 +265,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
                }
        } else {
                /* Iomem should not be marked encrypted */
-               cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
+               prot = pgprot_decrypted(prot);
        }
 
        /*
@@ -289,11 +288,20 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
                        pfn = page_to_pfn(page);
                }
 
+               /*
+                * Note that the value of @prot at this point may differ from
+                * the value of @vma->vm_page_prot in the caching- and
+                * encryption bits. This is because the exact location of the
+                * data may not be known at mmap() time and may also change
+                * at arbitrary times while the data is mmap'ed.
+                * See vmf_insert_mixed_prot() for a discussion.
+                */
                if (vma->vm_flags & VM_MIXEDMAP)
-                       ret = vmf_insert_mixed(&cvma, address,
-                                       __pfn_to_pfn_t(pfn, PFN_DEV));
+                       ret = vmf_insert_mixed_prot(vma, address,
+                                                   __pfn_to_pfn_t(pfn, PFN_DEV),
+                                                   prot);
                else
-                       ret = vmf_insert_pfn(&cvma, address, pfn);
+                       ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
 
                /* Never error on prefaulted PTEs */
                if (unlikely((ret & VM_FAULT_ERROR))) {
@@ -325,7 +333,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
        if (ret)
                return ret;
 
-       prot = vm_get_page_prot(vma->vm_flags);
+       prot = vma->vm_page_prot;
        ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
        if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
                return ret;
index 17583bf..d4c8300 100644 (file)
@@ -595,19 +595,18 @@ static int i8k_open_fs(struct inode *inode, struct file *file)
        return single_open(file, i8k_proc_show, NULL);
 }
 
-static const struct file_operations i8k_fops = {
-       .owner          = THIS_MODULE,
-       .open           = i8k_open_fs,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .unlocked_ioctl = i8k_ioctl,
+static const struct proc_ops i8k_proc_ops = {
+       .proc_open      = i8k_open_fs,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_ioctl     = i8k_ioctl,
 };
 
 static void __init i8k_init_procfs(void)
 {
        /* Register the proc entry */
-       proc_create("i8k", 0, NULL, &i8k_fops);
+       proc_create("i8k", 0, NULL, &i8k_proc_ops);
 }
 
 static void __exit i8k_exit_procfs(void)
index 14e1a53..3b05560 100644 (file)
@@ -76,7 +76,6 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
        struct device_node *node = pdev->dev.of_node;
        struct hwspinlock_device *bank;
        struct hwspinlock *hwlock;
-       struct resource *res;
        void __iomem *io_base;
        int num_locks, i, ret;
        /* Only a single hwspinlock block device is supported */
@@ -85,13 +84,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
        if (!node)
                return -ENODEV;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
-
-       io_base = ioremap(res->start, resource_size(res));
-       if (!io_base)
-               return -ENOMEM;
+       io_base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(io_base))
+               return PTR_ERR(io_base);
 
        /*
         * make sure the module is enabled and clocked before reading
@@ -101,7 +96,7 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(&pdev->dev);
        if (ret < 0) {
                pm_runtime_put_noidle(&pdev->dev);
-               goto iounmap_base;
+               goto runtime_err;
        }
 
        /* Determine number of locks */
@@ -114,20 +109,21 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
         */
        ret = pm_runtime_put(&pdev->dev);
        if (ret < 0)
-               goto iounmap_base;
+               goto runtime_err;
 
        /* one of the four lsb's must be set, and nothing else */
        if (hweight_long(i & 0xf) != 1 || i > 8) {
                ret = -EINVAL;
-               goto iounmap_base;
+               goto runtime_err;
        }
 
        num_locks = i * 32; /* actual number of locks in this device */
 
-       bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
+       bank = devm_kzalloc(&pdev->dev, struct_size(bank, lock, num_locks),
+                           GFP_KERNEL);
        if (!bank) {
                ret = -ENOMEM;
-               goto iounmap_base;
+               goto runtime_err;
        }
 
        platform_set_drvdata(pdev, bank);
@@ -138,25 +134,21 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
        ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
                                                base_id, num_locks);
        if (ret)
-               goto reg_fail;
+               goto runtime_err;
 
        dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
                num_locks);
 
        return 0;
 
-reg_fail:
-       kfree(bank);
-iounmap_base:
+runtime_err:
        pm_runtime_disable(&pdev->dev);
-       iounmap(io_base);
        return ret;
 }
 
 static int omap_hwspinlock_remove(struct platform_device *pdev)
 {
        struct hwspinlock_device *bank = platform_get_drvdata(pdev);
-       void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
        int ret;
 
        ret = hwspin_lock_unregister(bank);
@@ -166,8 +158,6 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
        }
 
        pm_runtime_disable(&pdev->dev);
-       iounmap(io_base);
-       kfree(bank);
 
        return 0;
 }
index 6da7447..f0da544 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 
 #include "hwspinlock_internal.h"
@@ -122,35 +121,12 @@ static int qcom_hwspinlock_probe(struct platform_device *pdev)
                                                             regmap, field);
        }
 
-       pm_runtime_enable(&pdev->dev);
-
-       ret = hwspin_lock_register(bank, &pdev->dev, &qcom_hwspinlock_ops,
-                                  0, QCOM_MUTEX_NUM_LOCKS);
-       if (ret)
-               pm_runtime_disable(&pdev->dev);
-
-       return ret;
-}
-
-static int qcom_hwspinlock_remove(struct platform_device *pdev)
-{
-       struct hwspinlock_device *bank = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = hwspin_lock_unregister(bank);
-       if (ret) {
-               dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
-               return ret;
-       }
-
-       pm_runtime_disable(&pdev->dev);
-
-       return 0;
+       return devm_hwspin_lock_register(&pdev->dev, bank, &qcom_hwspinlock_ops,
+                                        0, QCOM_MUTEX_NUM_LOCKS);
 }
 
 static struct platform_driver qcom_hwspinlock_driver = {
        .probe          = qcom_hwspinlock_probe,
-       .remove         = qcom_hwspinlock_remove,
        .driver         = {
                .name   = "qcom_hwspinlock",
                .of_match_table = qcom_hwspinlock_of_match,
index 1f625cd..823d3c4 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/io.h>
-#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/hwspinlock.h>
@@ -56,7 +55,7 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
 {
        struct sirf_hwspinlock *hwspin;
        struct hwspinlock *hwlock;
-       int idx, ret;
+       int idx;
 
        if (!pdev->dev.of_node)
                return -ENODEV;
@@ -69,9 +68,9 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* retrieve io base */
-       hwspin->io_base = of_iomap(pdev->dev.of_node, 0);
-       if (!hwspin->io_base)
-               return -ENOMEM;
+       hwspin->io_base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(hwspin->io_base))
+               return PTR_ERR(hwspin->io_base);
 
        for (idx = 0; idx < HW_SPINLOCK_NUMBER; idx++) {
                hwlock = &hwspin->bank.lock[idx];
@@ -80,39 +79,9 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, hwspin);
 
-       pm_runtime_enable(&pdev->dev);
-
-       ret = hwspin_lock_register(&hwspin->bank, &pdev->dev,
-                                  &sirf_hwspinlock_ops, 0,
-                                  HW_SPINLOCK_NUMBER);
-       if (ret)
-               goto reg_failed;
-
-       return 0;
-
-reg_failed:
-       pm_runtime_disable(&pdev->dev);
-       iounmap(hwspin->io_base);
-
-       return ret;
-}
-
-static int sirf_hwspinlock_remove(struct platform_device *pdev)
-{
-       struct sirf_hwspinlock *hwspin = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = hwspin_lock_unregister(&hwspin->bank);
-       if (ret) {
-               dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
-               return ret;
-       }
-
-       pm_runtime_disable(&pdev->dev);
-
-       iounmap(hwspin->io_base);
-
-       return 0;
+       return devm_hwspin_lock_register(&pdev->dev, &hwspin->bank,
+                                        &sirf_hwspinlock_ops, 0,
+                                        HW_SPINLOCK_NUMBER);
 }
 
 static const struct of_device_id sirf_hwpinlock_ids[] = {
@@ -123,7 +92,6 @@ MODULE_DEVICE_TABLE(of, sirf_hwpinlock_ids);
 
 static struct platform_driver sirf_hwspinlock_driver = {
        .probe = sirf_hwspinlock_probe,
-       .remove = sirf_hwspinlock_remove,
        .driver = {
                .name = "atlas7_hwspinlock",
                .of_match_table = of_match_ptr(sirf_hwpinlock_ids),
index c8eacf4..3ad0ce0 100644 (file)
@@ -58,12 +58,10 @@ static int stm32_hwspinlock_probe(struct platform_device *pdev)
 {
        struct stm32_hwspinlock *hw;
        void __iomem *io_base;
-       struct resource *res;
        size_t array_size;
        int i, ret;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       io_base = devm_ioremap_resource(&pdev->dev, res);
+       io_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(io_base))
                return PTR_ERR(io_base);
 
index a5a95ea..febb7c7 100644 (file)
@@ -901,14 +901,13 @@ mv64xxx_i2c_probe(struct platform_device *pd)
 
        /* Not all platforms have clocks */
        drv_data->clk = devm_clk_get(&pd->dev, NULL);
-       if (IS_ERR(drv_data->clk) && PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
+       if (PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
        if (!IS_ERR(drv_data->clk))
                clk_prepare_enable(drv_data->clk);
 
        drv_data->reg_clk = devm_clk_get(&pd->dev, "reg");
-       if (IS_ERR(drv_data->reg_clk) &&
-           PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
+       if (PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
        if (!IS_ERR(drv_data->reg_clk))
                clk_prepare_enable(drv_data->reg_clk);
index 39762f0..8602679 100644 (file)
@@ -553,7 +553,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
                                 &i2c->pclkrate);
 
        i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
-       if (IS_ERR(i2c->pclk) && PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
+       if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
        if (!IS_ERR_OR_NULL(i2c->pclk)) {
                dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
index e73016c..15c17f3 100644 (file)
@@ -381,13 +381,12 @@ parse_error:
        return -EINVAL;
 }
 
-static const struct file_operations ide_settings_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = ide_settings_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = ide_settings_proc_write,
+static const struct proc_ops ide_settings_proc_ops = {
+       .proc_open      = ide_settings_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = ide_settings_proc_write,
 };
 
 int ide_capacity_proc_show(struct seq_file *m, void *v)
@@ -546,7 +545,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
                if (drive->proc) {
                        ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
                        proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
-                                       drive->proc, &ide_settings_proc_fops,
+                                       drive->proc, &ide_settings_proc_ops,
                                        drive);
                }
                sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
@@ -615,7 +614,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
        return 0;
 }
 
-DEFINE_SHOW_ATTRIBUTE(ide_drivers);
+DEFINE_PROC_SHOW_ATTRIBUTE(ide_drivers);
 
 void proc_ide_create(void)
 {
@@ -624,7 +623,7 @@ void proc_ide_create(void)
        if (!proc_ide_root)
                return;
 
-       proc_create("drivers", 0, proc_ide_root, &ide_drivers_fops);
+       proc_create("drivers", 0, proc_ide_root, &ide_drivers_proc_ops);
 }
 
 void proc_ide_destroy(void)
index 65f85fa..68e847c 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/platform_data/cros_ec_commands.h>
index 7dce044..576e45f 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
index 81a7f69..d3a3626 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/iio/kfifo_buf.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/platform_data/cros_ec_commands.h>
index d85a391..7a838e2 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/iio/triggered_buffer.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
index 52f53f3..b521beb 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/iio/triggered_buffer.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/platform_data/cros_ec_commands.h>
index ee6c323..fce43e6 100644 (file)
@@ -1216,13 +1216,12 @@ static int input_proc_devices_open(struct inode *inode, struct file *file)
        return seq_open(file, &input_devices_seq_ops);
 }
 
-static const struct file_operations input_devices_fileops = {
-       .owner          = THIS_MODULE,
-       .open           = input_proc_devices_open,
-       .poll           = input_proc_devices_poll,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops input_devices_proc_ops = {
+       .proc_open      = input_proc_devices_open,
+       .proc_poll      = input_proc_devices_poll,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
@@ -1280,12 +1279,11 @@ static int input_proc_handlers_open(struct inode *inode, struct file *file)
        return seq_open(file, &input_handlers_seq_ops);
 }
 
-static const struct file_operations input_handlers_fileops = {
-       .owner          = THIS_MODULE,
-       .open           = input_proc_handlers_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops input_handlers_proc_ops = {
+       .proc_open      = input_proc_handlers_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 static int __init input_proc_init(void)
@@ -1297,12 +1295,12 @@ static int __init input_proc_init(void)
                return -ENOMEM;
 
        entry = proc_create("devices", 0, proc_bus_input_dir,
-                           &input_devices_fileops);
+                           &input_devices_proc_ops);
        if (!entry)
                goto fail1;
 
        entry = proc_create("handlers", 0, proc_bus_input_dir,
-                           &input_handlers_fileops);
+                           &input_handlers_proc_ops);
        if (!entry)
                goto fail2;
 
index 17c1cca..c8f87df 100644 (file)
@@ -191,9 +191,10 @@ static ssize_t axp20x_store_attr_shutdown(struct device *dev,
                                 axp20x_pek->info->shutdown_mask, buf, count);
 }
 
-DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup, axp20x_store_attr_startup);
-DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown,
-           axp20x_store_attr_shutdown);
+static DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup,
+                  axp20x_store_attr_startup);
+static DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown,
+                  axp20x_store_attr_shutdown);
 
 static struct attribute *axp20x_attrs[] = {
        &dev_attr_startup.attr,
@@ -279,8 +280,7 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
                return error;
        }
 
-       if (axp20x_pek->axp20x->variant == AXP288_ID)
-               enable_irq_wake(axp20x_pek->irq_dbr);
+       device_init_wakeup(&pdev->dev, true);
 
        return 0;
 }
@@ -352,6 +352,40 @@ static int axp20x_pek_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int __maybe_unused axp20x_pek_suspend(struct device *dev)
+{
+       struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+       /*
+        * As nested threaded IRQs are not automatically disabled during
+        * suspend, we must explicitly disable non-wakeup IRQs.
+        */
+       if (device_may_wakeup(dev)) {
+               enable_irq_wake(axp20x_pek->irq_dbf);
+               enable_irq_wake(axp20x_pek->irq_dbr);
+       } else {
+               disable_irq(axp20x_pek->irq_dbf);
+               disable_irq(axp20x_pek->irq_dbr);
+       }
+
+       return 0;
+}
+
+static int __maybe_unused axp20x_pek_resume(struct device *dev)
+{
+       struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev)) {
+               disable_irq_wake(axp20x_pek->irq_dbf);
+               disable_irq_wake(axp20x_pek->irq_dbr);
+       } else {
+               enable_irq(axp20x_pek->irq_dbf);
+               enable_irq(axp20x_pek->irq_dbr);
+       }
+
+       return 0;
+}
+
 static int __maybe_unused axp20x_pek_resume_noirq(struct device *dev)
 {
        struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
@@ -371,6 +405,7 @@ static int __maybe_unused axp20x_pek_resume_noirq(struct device *dev)
 }
 
 static const struct dev_pm_ops axp20x_pek_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(axp20x_pek_suspend, axp20x_pek_resume)
 #ifdef CONFIG_PM_SLEEP
        .resume_noirq = axp20x_pek_resume_noirq,
 #endif
index bbf9ae9..6adea8a 100644 (file)
@@ -412,6 +412,10 @@ struct f11_2d_sensor_queries {
 
 /* Defs for Ctrl0. */
 #define RMI_F11_REPORT_MODE_MASK        0x07
+#define RMI_F11_REPORT_MODE_CONTINUOUS  (0 << 0)
+#define RMI_F11_REPORT_MODE_REDUCED     (1 << 0)
+#define RMI_F11_REPORT_MODE_FS_CHANGE   (2 << 0)
+#define RMI_F11_REPORT_MODE_FP_CHANGE   (3 << 0)
 #define RMI_F11_ABS_POS_FILT            (1 << 3)
 #define RMI_F11_REL_POS_FILT            (1 << 4)
 #define RMI_F11_REL_BALLISTICS          (1 << 5)
@@ -1195,6 +1199,16 @@ static int rmi_f11_initialize(struct rmi_function *fn)
                ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
                        sensor->axis_align.delta_y_threshold;
 
+       /*
+        * If distance threshold values are set, switch to reduced reporting
+        * mode so they actually get used by the controller.
+        */
+       if (ctrl->ctrl0_11[RMI_F11_DELTA_X_THRESHOLD] ||
+           ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD]) {
+               ctrl->ctrl0_11[0] &= ~RMI_F11_REPORT_MODE_MASK;
+               ctrl->ctrl0_11[0] |= RMI_F11_REPORT_MODE_REDUCED;
+       }
+
        if (f11->sens_query.has_dribble) {
                switch (sensor->dribble) {
                case RMI_REG_STATE_OFF:
index f3e18f8..373a164 100644 (file)
@@ -165,6 +165,16 @@ config SERIO_MACEPS2
          To compile this driver as a module, choose M here: the
          module will be called maceps2.
 
+config SERIO_SGI_IOC3
+       tristate "SGI IOC3 PS/2 controller"
+       depends on SGI_MFD_IOC3
+       help
+         Say Y here if you have an SGI Onyx2, SGI Octane or IOC3 PCI card
+         and you want to attach and use a keyboard, mouse, or both.
+
+         To compile this driver as a module, choose M here: the
+         module will be called ioc3kbd.
+
 config SERIO_LIBPS2
        tristate "PS/2 driver library"
        depends on SERIO_I8042 || SERIO_I8042=n
index 67950a5..6d97bad 100644 (file)
@@ -20,6 +20,7 @@ obj-$(CONFIG_HIL_MLC)         += hp_sdc_mlc.o hil_mlc.o
 obj-$(CONFIG_SERIO_PCIPS2)     += pcips2.o
 obj-$(CONFIG_SERIO_PS2MULT)    += ps2mult.o
 obj-$(CONFIG_SERIO_MACEPS2)    += maceps2.o
+obj-$(CONFIG_SERIO_SGI_IOC3)   += ioc3kbd.o
 obj-$(CONFIG_SERIO_LIBPS2)     += libps2.o
 obj-$(CONFIG_SERIO_RAW)                += serio_raw.o
 obj-$(CONFIG_SERIO_AMS_DELTA)  += ams_delta_serio.o
index f290d5d..594ac4e 100644 (file)
@@ -51,7 +51,7 @@ struct apbps2_regs {
 
 struct apbps2_priv {
        struct serio            *io;
-       struct apbps2_regs      *regs;
+       struct apbps2_regs      __iomem *regs;
 };
 
 static int apbps2_idx;
diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c
new file mode 100644 (file)
index 0000000..d51bfe9
--- /dev/null
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 PS/2 controller driver for linux
+ *
+ * Copyright (C) 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * Based on code Copyright (C) 2005 Stanislaw Skowronek <skylark@unaligned.org>
+ *               Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/serio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/sn/ioc3.h>
+
+struct ioc3kbd_data {
+       struct ioc3_serioregs __iomem *regs;
+       struct serio *kbd, *aux;
+       bool kbd_exists, aux_exists;
+       int irq;
+};
+
+static int ioc3kbd_wait(struct ioc3_serioregs __iomem *regs, u32 mask)
+{
+       unsigned long timeout = 0;
+
+       while ((readl(&regs->km_csr) & mask) && (timeout < 250)) {
+               udelay(50);
+               timeout++;
+       }
+       return (timeout >= 250) ? -ETIMEDOUT : 0;
+}
+
+static int ioc3kbd_write(struct serio *dev, u8 val)
+{
+       struct ioc3kbd_data *d = dev->port_data;
+       int ret;
+
+       ret = ioc3kbd_wait(d->regs, KM_CSR_K_WRT_PEND);
+       if (ret)
+               return ret;
+
+       writel(val, &d->regs->k_wd);
+
+       return 0;
+}
+
+static int ioc3kbd_start(struct serio *dev)
+{
+       struct ioc3kbd_data *d = dev->port_data;
+
+       d->kbd_exists = true;
+       return 0;
+}
+
+static void ioc3kbd_stop(struct serio *dev)
+{
+       struct ioc3kbd_data *d = dev->port_data;
+
+       d->kbd_exists = false;
+}
+
+static int ioc3aux_write(struct serio *dev, u8 val)
+{
+       struct ioc3kbd_data *d = dev->port_data;
+       int ret;
+
+       ret = ioc3kbd_wait(d->regs, KM_CSR_M_WRT_PEND);
+       if (ret)
+               return ret;
+
+       writel(val, &d->regs->m_wd);
+
+       return 0;
+}
+
+static int ioc3aux_start(struct serio *dev)
+{
+       struct ioc3kbd_data *d = dev->port_data;
+
+       d->aux_exists = true;
+       return 0;
+}
+
+static void ioc3aux_stop(struct serio *dev)
+{
+       struct ioc3kbd_data *d = dev->port_data;
+
+       d->aux_exists = false;
+}
+
+static void ioc3kbd_process_data(struct serio *dev, u32 data)
+{
+       if (data & KM_RD_VALID_0)
+               serio_interrupt(dev, (data >> KM_RD_DATA_0_SHIFT) & 0xff, 0);
+       if (data & KM_RD_VALID_1)
+               serio_interrupt(dev, (data >> KM_RD_DATA_1_SHIFT) & 0xff, 0);
+       if (data & KM_RD_VALID_2)
+               serio_interrupt(dev, (data >> KM_RD_DATA_2_SHIFT) & 0xff, 0);
+}
+
+static irqreturn_t ioc3kbd_intr(int itq, void *dev_id)
+{
+       struct ioc3kbd_data *d = dev_id;
+       u32 data_k, data_m;
+
+       data_k = readl(&d->regs->k_rd);
+       if (d->kbd_exists)
+               ioc3kbd_process_data(d->kbd, data_k);
+
+       data_m = readl(&d->regs->m_rd);
+       if (d->aux_exists)
+               ioc3kbd_process_data(d->aux, data_m);
+
+       return IRQ_HANDLED;
+}
+
+static int ioc3kbd_probe(struct platform_device *pdev)
+{
+       struct ioc3_serioregs __iomem *regs;
+       struct device *dev = &pdev->dev;
+       struct ioc3kbd_data *d;
+       struct serio *sk, *sa;
+       int irq, ret;
+
+       regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return -ENXIO;
+
+       d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+       if (!d)
+               return -ENOMEM;
+
+       sk = kzalloc(sizeof(*sk), GFP_KERNEL);
+       if (!sk)
+               return -ENOMEM;
+
+       sa = kzalloc(sizeof(*sa), GFP_KERNEL);
+       if (!sa) {
+               kfree(sk);
+               return -ENOMEM;
+       }
+
+       sk->id.type = SERIO_8042;
+       sk->write = ioc3kbd_write;
+       sk->start = ioc3kbd_start;
+       sk->stop = ioc3kbd_stop;
+       snprintf(sk->name, sizeof(sk->name), "IOC3 keyboard %d", pdev->id);
+       snprintf(sk->phys, sizeof(sk->phys), "ioc3/serio%dkbd", pdev->id);
+       sk->port_data = d;
+       sk->dev.parent = dev;
+
+       sa->id.type = SERIO_8042;
+       sa->write = ioc3aux_write;
+       sa->start = ioc3aux_start;
+       sa->stop = ioc3aux_stop;
+       snprintf(sa->name, sizeof(sa->name), "IOC3 auxiliary %d", pdev->id);
+       snprintf(sa->phys, sizeof(sa->phys), "ioc3/serio%daux", pdev->id);
+       sa->port_data = d;
+       sa->dev.parent = dev;
+
+       d->regs = regs;
+       d->kbd = sk;
+       d->aux = sa;
+       d->irq = irq;
+
+       platform_set_drvdata(pdev, d);
+       serio_register_port(d->kbd);
+       serio_register_port(d->aux);
+
+       ret = request_irq(irq, ioc3kbd_intr, IRQF_SHARED, "ioc3-kbd", d);
+       if (ret) {
+               dev_err(dev, "could not request IRQ %d\n", irq);
+               serio_unregister_port(d->kbd);
+               serio_unregister_port(d->aux);
+               return ret;
+       }
+
+       /* enable ports */
+       writel(KM_CSR_K_CLAMP_3 | KM_CSR_M_CLAMP_3, &regs->km_csr);
+
+       return 0;
+}
+
+static int ioc3kbd_remove(struct platform_device *pdev)
+{
+       struct ioc3kbd_data *d = platform_get_drvdata(pdev);
+
+       free_irq(d->irq, d);
+
+       serio_unregister_port(d->kbd);
+       serio_unregister_port(d->aux);
+
+       return 0;
+}
+
+static struct platform_driver ioc3kbd_driver = {
+       .probe          = ioc3kbd_probe,
+       .remove         = ioc3kbd_remove,
+       .driver = {
+               .name = "ioc3-kbd",
+       },
+};
+module_platform_driver(ioc3kbd_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 serio driver");
+MODULE_LICENSE("GPL");
index 51ddb20..8fd7fc3 100644 (file)
@@ -333,7 +333,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
                req->xfer[1].len = 2;
 
                /* for 1uF, settle for 800 usec; no cap, 100 usec.  */
-               req->xfer[1].delay_usecs = ts->vref_delay_usecs;
+               req->xfer[1].delay.value = ts->vref_delay_usecs;
+               req->xfer[1].delay.unit = SPI_DELAY_UNIT_USECS;
                spi_message_add_tail(&req->xfer[1], &req->msg);
 
                /* Enable reference voltage */
@@ -1018,7 +1019,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
         * have had enough time to stabilize.
         */
        if (pdata->settle_delay_usecs) {
-               x->delay_usecs = pdata->settle_delay_usecs;
+               x->delay.value = pdata->settle_delay_usecs;
+               x->delay.unit = SPI_DELAY_UNIT_USECS;
 
                x++;
                x->tx_buf = &packet->read_y;
@@ -1061,7 +1063,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
 
        /* ... maybe discard first sample ... */
        if (pdata->settle_delay_usecs) {
-               x->delay_usecs = pdata->settle_delay_usecs;
+               x->delay.value = pdata->settle_delay_usecs;
+               x->delay.unit = SPI_DELAY_UNIT_USECS;
 
                x++;
                x->tx_buf = &packet->read_x;
@@ -1094,7 +1097,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
 
                /* ... maybe discard first sample ... */
                if (pdata->settle_delay_usecs) {
-                       x->delay_usecs = pdata->settle_delay_usecs;
+                       x->delay.value = pdata->settle_delay_usecs;
+                       x->delay.unit = SPI_DELAY_UNIT_USECS;
 
                        x++;
                        x->tx_buf = &packet->read_z1;
@@ -1125,7 +1129,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
 
                /* ... maybe discard first sample ... */
                if (pdata->settle_delay_usecs) {
-                       x->delay_usecs = pdata->settle_delay_usecs;
+                       x->delay.value = pdata->settle_delay_usecs;
+                       x->delay.unit = SPI_DELAY_UNIT_USECS;
 
                        x++;
                        x->tx_buf = &packet->read_z2;
index d61731c..d258772 100644 (file)
  *    http://www.glyn.com/Products/Displays
  */
 
-#include <linux/module.h>
-#include <linux/ratelimit.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
 #include <linux/debugfs.h>
-#include <linux/slab.h>
+#include <linux/delay.h>
 #include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
-#include <asm/unaligned.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
 #include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/unaligned.h>
 
 #define WORK_REGISTER_THRESHOLD                0x00
 #define WORK_REGISTER_REPORT_RATE      0x08
@@ -1050,6 +1051,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
 {
        const struct edt_i2c_chip_data *chip_data;
        struct edt_ft5x06_ts_data *tsdata;
+       u8 buf[2] = { 0xfc, 0x00 };
        struct input_dev *input;
        unsigned long irq_flags;
        int error;
@@ -1140,6 +1142,12 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
                return error;
        }
 
+       /*
+        * Dummy read access. EP0700MLP1 returns bogus data on the first
+        * register read access and ignores writes.
+        */
+       edt_ft5x06_ts_readwrite(tsdata->client, 2, buf, 2, buf);
+
        edt_ft5x06_ts_set_regs(tsdata);
        edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
        edt_ft5x06_ts_get_parameters(tsdata);
@@ -1200,7 +1208,6 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
                return error;
 
        edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
-       device_init_wakeup(&client->dev, 1);
 
        dev_dbg(&client->dev,
                "EDT FT5x06 initialized: IRQ %d, WAKE pin %d, Reset pin %d.\n",
@@ -1220,29 +1227,6 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
        return 0;
 }
 
-static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-
-       if (device_may_wakeup(dev))
-               enable_irq_wake(client->irq);
-
-       return 0;
-}
-
-static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-
-       if (device_may_wakeup(dev))
-               disable_irq_wake(client->irq);
-
-       return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
-                        edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
-
 static const struct edt_i2c_chip_data edt_ft5x06_data = {
        .max_support_points = 5,
 };
@@ -1281,7 +1265,6 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
        .driver = {
                .name = "edt_ft5x06",
                .of_match_table = edt_ft5x06_of_match,
-               .pm = &edt_ft5x06_ts_pm_ops,
        },
        .id_table = edt_ft5x06_ts_id,
        .probe    = edt_ft5x06_ts_probe,
index d4ad24e..4911799 100644 (file)
 #define CMD_HEADER_WRITE       0x54
 #define CMD_HEADER_READ                0x53
 #define CMD_HEADER_6B_READ     0x5B
+#define CMD_HEADER_ROM_READ    0x96
 #define CMD_HEADER_RESP                0x52
 #define CMD_HEADER_6B_RESP     0x9B
+#define CMD_HEADER_ROM_RESP    0x95
 #define CMD_HEADER_HELLO       0x55
 #define CMD_HEADER_REK         0x66
 
@@ -200,6 +202,10 @@ static int elants_i2c_execute_command(struct i2c_client *client,
                expected_response = CMD_HEADER_6B_RESP;
                break;
 
+       case CMD_HEADER_ROM_READ:
+               expected_response = CMD_HEADER_ROM_RESP;
+               break;
+
        default:
                dev_err(&client->dev, "%s: invalid command %*ph\n",
                        __func__, (int)cmd_size, cmd);
@@ -556,6 +562,8 @@ static int elants_i2c_initialize(struct elants_data *ts)
 
        /* hw version is available even if device in recovery state */
        error2 = elants_i2c_query_hw_version(ts);
+       if (!error2)
+               error2 = elants_i2c_query_bc_version(ts);
        if (!error)
                error = error2;
 
@@ -563,8 +571,6 @@ static int elants_i2c_initialize(struct elants_data *ts)
                error = elants_i2c_query_fw_version(ts);
        if (!error)
                error = elants_i2c_query_test_version(ts);
-       if (!error)
-               error = elants_i2c_query_bc_version(ts);
        if (!error)
                error = elants_i2c_query_ts_info(ts);
 
@@ -613,39 +619,94 @@ static int elants_i2c_fw_write_page(struct i2c_client *client,
        return error;
 }
 
+static int elants_i2c_validate_remark_id(struct elants_data *ts,
+                                        const struct firmware *fw)
+{
+       struct i2c_client *client = ts->client;
+       int error;
+       const u8 cmd[] = { CMD_HEADER_ROM_READ, 0x80, 0x1F, 0x00, 0x00, 0x21 };
+       u8 resp[6] = { 0 };
+       u16 ts_remark_id = 0;
+       u16 fw_remark_id = 0;
+
+       /* Compare TS Remark ID and FW Remark ID */
+       error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+                                       resp, sizeof(resp));
+       if (error) {
+               dev_err(&client->dev, "failed to query Remark ID: %d\n", error);
+               return error;
+       }
+
+       ts_remark_id = get_unaligned_be16(&resp[3]);
+
+       fw_remark_id = get_unaligned_le16(&fw->data[fw->size - 4]);
+
+       if (fw_remark_id != ts_remark_id) {
+               dev_err(&client->dev,
+                       "Remark ID Mismatched: ts_remark_id=0x%04x, fw_remark_id=0x%04x.\n",
+                       ts_remark_id, fw_remark_id);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int elants_i2c_do_update_firmware(struct i2c_client *client,
                                         const struct firmware *fw,
                                         bool force)
 {
+       struct elants_data *ts = i2c_get_clientdata(client);
        const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
        const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
        const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
-       const u8 close_idle[] = {0x54, 0x2c, 0x01, 0x01};
+       const u8 close_idle[] = { 0x54, 0x2c, 0x01, 0x01 };
        u8 buf[HEADER_SIZE];
        u16 send_id;
        int page, n_fw_pages;
        int error;
+       bool check_remark_id = ts->iap_version >= 0x60;
 
        /* Recovery mode detection! */
        if (force) {
                dev_dbg(&client->dev, "Recovery mode procedure\n");
+
+               if (check_remark_id) {
+                       error = elants_i2c_validate_remark_id(ts, fw);
+                       if (error)
+                               return error;
+               }
+
                error = elants_i2c_send(client, enter_iap2, sizeof(enter_iap2));
+               if (error) {
+                       dev_err(&client->dev, "failed to enter IAP mode: %d\n",
+                               error);
+                       return error;
+               }
        } else {
                /* Start IAP Procedure */
                dev_dbg(&client->dev, "Normal IAP procedure\n");
+
                /* Close idle mode */
                error = elants_i2c_send(client, close_idle, sizeof(close_idle));
                if (error)
                        dev_err(&client->dev, "Failed close idle: %d\n", error);
                msleep(60);
+
                elants_i2c_sw_reset(client);
                msleep(20);
-               error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
-       }
 
-       if (error) {
-               dev_err(&client->dev, "failed to enter IAP mode: %d\n", error);
-               return error;
+               if (check_remark_id) {
+                       error = elants_i2c_validate_remark_id(ts, fw);
+                       if (error)
+                               return error;
+               }
+
+               error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
+               if (error) {
+                       dev_err(&client->dev, "failed to enter IAP mode: %d\n",
+                               error);
+                       return error;
+               }
        }
 
        msleep(20);
index 0b9d78a..d2fade9 100644 (file)
@@ -82,7 +82,7 @@ config IOMMU_DEBUGFS
 config IOMMU_DEFAULT_PASSTHROUGH
        bool "IOMMU passthrough by default"
        depends on IOMMU_API
-        help
+       help
          Enable passthrough by default, removing the need to pass in
          iommu.passthrough=on or iommu=pt through command line. If this
          is enabled, you can still disable with iommu.passthrough=off
@@ -91,8 +91,8 @@ config IOMMU_DEFAULT_PASSTHROUGH
          If unsure, say N here.
 
 config OF_IOMMU
-       def_bool y
-       depends on OF && IOMMU_API
+       def_bool y
+       depends on OF && IOMMU_API
 
 # IOMMU-agnostic DMA-mapping layer
 config IOMMU_DMA
@@ -214,6 +214,7 @@ config INTEL_IOMMU_SVM
        select PCI_PASID
        select PCI_PRI
        select MMU_NOTIFIER
+       select IOASID
        help
          Shared Virtual Memory (SVM) provides a facility for devices
          to access DMA resources through process address space by
@@ -248,6 +249,18 @@ config INTEL_IOMMU_FLOPPY_WA
          workaround will setup a 1:1 mapping for the first
          16MiB to make floppy (an ISA device) work.
 
+config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+       bool "Enable Intel IOMMU scalable mode by default"
+       depends on INTEL_IOMMU
+       help
+         Selecting this option will enable by default the scalable mode if
+         hardware presents the capability. The scalable mode is defined in
+         VT-d 3.0. The scalable mode capability could be checked by reading
+         /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
+         is not selected, scalable mode support could also be enabled by
+         passing intel_iommu=sm_on to the kernel. If not sure, please use
+         the default value.
+
 config IRQ_REMAP
        bool "Support for Interrupt Remapping"
        depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
@@ -356,7 +369,7 @@ config SPAPR_TCE_IOMMU
 
 # ARM IOMMU support
 config ARM_SMMU
-       bool "ARM Ltd. System MMU (SMMU) Support"
+       tristate "ARM Ltd. System MMU (SMMU) Support"
        depends on (ARM64 || ARM) && MMU
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
@@ -368,6 +381,18 @@ config ARM_SMMU
          Say Y here if your SoC includes an IOMMU device implementing
          the ARM SMMU architecture.
 
+config ARM_SMMU_LEGACY_DT_BINDINGS
+       bool "Support the legacy \"mmu-masters\" devicetree bindings"
+       depends on ARM_SMMU=y && OF
+       help
+         Support for the badly designed and deprecated "mmu-masters"
+         devicetree bindings. This allows some DMA masters to attach
+         to the SMMU but does not provide any support via the DMA API.
+         If you're lucky, you might be able to get VFIO up and running.
+
+         If you say Y here then you'll make me very sad. Instead, say N
+         and move your firmware to the utopian future that was 2016.
+
 config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
        bool "Default to disabling bypass on ARM SMMU v1 and v2"
        depends on ARM_SMMU
@@ -394,7 +419,7 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
          config.
 
 config ARM_SMMU_V3
-       bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+       tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
        depends on ARM64
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
index 97814cc..2104fb8 100644 (file)
@@ -14,7 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o
+arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
 obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
index 7a6c056..aac132b 100644 (file)
@@ -2294,7 +2294,6 @@ int __init amd_iommu_init_api(void)
 int __init amd_iommu_init_dma_ops(void)
 {
        swiotlb        = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
-       iommu_detected = 1;
 
        if (amd_iommu_unmap_flush)
                pr_info("IO/TLB flush on unmap enabled\n");
@@ -2638,15 +2637,6 @@ static void amd_iommu_get_resv_regions(struct device *dev,
        list_add_tail(&region->list, head);
 }
 
-static void amd_iommu_put_resv_regions(struct device *dev,
-                                    struct list_head *head)
-{
-       struct iommu_resv_region *entry, *next;
-
-       list_for_each_entry_safe(entry, next, head, list)
-               kfree(entry);
-}
-
 static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
                                         struct device *dev)
 {
@@ -2685,7 +2675,7 @@ const struct iommu_ops amd_iommu_ops = {
        .device_group = amd_iommu_device_group,
        .domain_get_attr = amd_iommu_domain_get_attr,
        .get_resv_regions = amd_iommu_get_resv_regions,
-       .put_resv_regions = amd_iommu_put_resv_regions,
+       .put_resv_regions = generic_iommu_put_resv_regions,
        .is_attach_deferred = amd_iommu_is_attach_deferred,
        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
        .flush_iotlb_all = amd_iommu_flush_iotlb_all,
index 823cc4e..2759a8d 100644 (file)
@@ -71,6 +71,8 @@
 #define IVHD_FLAG_ISOC_EN_MASK          0x08
 
 #define IVMD_FLAG_EXCL_RANGE            0x08
+#define IVMD_FLAG_IW                    0x04
+#define IVMD_FLAG_IR                    0x02
 #define IVMD_FLAG_UNITY_MAP             0x01
 
 #define ACPI_DEVFLAG_INITPASS           0x01
@@ -147,7 +149,7 @@ bool amd_iommu_dump;
 bool amd_iommu_irq_remap __read_mostly;
 
 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
-static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
 
 static bool amd_iommu_detected;
 static bool __initdata amd_iommu_disabled;
@@ -714,7 +716,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
        writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
        writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 
-       iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+       iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
        iommu_feature_enable(iommu, CONTROL_PPR_EN);
 }
 
@@ -1116,21 +1118,17 @@ static int __init add_early_maps(void)
  */
 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
 {
-       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
        if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
                return;
 
-       if (iommu) {
-               /*
-                * We only can configure exclusion ranges per IOMMU, not
-                * per device. But we can enable the exclusion range per
-                * device. This is done here
-                */
-               set_dev_entry_bit(devid, DEV_ENTRY_EX);
-               iommu->exclusion_start = m->range_start;
-               iommu->exclusion_length = m->range_length;
-       }
+       /*
+        * Treat per-device exclusion ranges as r/w unity-mapped regions
+        * since some buggy BIOSes might lead to the overwritten exclusion
+        * range (exclusion_start and exclusion_length members). This
+        * happens when there are multiple exclusion ranges (IVMD entries)
+        * defined in ACPI table.
+        */
+       m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
 }
 
 /*
@@ -1523,8 +1521,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
                        iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
                if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
                        amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
-               if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
-                       amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
                break;
        case 0x11:
        case 0x40:
@@ -1534,8 +1530,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
                        iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
                if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
                        amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
-               if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
-                       amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+               /*
+                * Note: Since iommu_update_intcapxt() leverages
+                * the IOMMU MMIO access to MSI capability block registers
+                * for MSI address lo/hi/data, we need to check both
+                * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
+                */
+               if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
+                   (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
+                       amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
                break;
        default:
                return -EINVAL;
@@ -1727,7 +1730,6 @@ static const struct attribute_group *amd_iommu_groups[] = {
 static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
        int cap_ptr = iommu->cap_ptr;
-       u32 range, misc, low, high;
        int ret;
 
        iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
@@ -1740,19 +1742,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
 
        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
                              &iommu->cap);
-       pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
-                             &range);
-       pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
-                             &misc);
 
        if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
                amd_iommu_iotlb_sup = false;
 
        /* read extended feature bits */
-       low  = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
-       high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
-
-       iommu->features = ((u64)high << 32) | low;
+       iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
 
        if (iommu_feature(iommu, FEATURE_GT)) {
                int glxval;
@@ -1996,8 +1991,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu)
        struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
 
        /**
-        * IntCapXT requires XTSup=1, which can be inferred
-        * amd_iommu_xt_mode.
+        * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
+        * which can be inferred from amd_iommu_xt_mode.
         */
        if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
                return 0;
@@ -2044,7 +2039,7 @@ enable_faults:
        iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
 
        if (iommu->ppr_log != NULL)
-               iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+               iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
 
        iommu_ga_log_enable(iommu);
 
index f52f59d..f8d01d6 100644 (file)
 #define CONTROL_COHERENT_EN     0x0aULL
 #define CONTROL_ISOC_EN         0x0bULL
 #define CONTROL_CMDBUF_EN       0x0cULL
-#define CONTROL_PPFLOG_EN       0x0dULL
-#define CONTROL_PPFINT_EN       0x0eULL
+#define CONTROL_PPRLOG_EN       0x0dULL
+#define CONTROL_PPRINT_EN       0x0eULL
 #define CONTROL_PPR_EN          0x0fULL
 #define CONTROL_GT_EN           0x10ULL
 #define CONTROL_GA_EN           0x11ULL
 #define IOMMU_CAP_EFR     27
 
 /* IOMMU Feature Reporting Field (for IVHD type 10h */
-#define IOMMU_FEAT_XTSUP_SHIFT 0
 #define IOMMU_FEAT_GASUP_SHIFT 6
 
 /* IOMMU Extended Feature Register (EFR) */
 #define IOMMU_EFR_XTSUP_SHIFT  2
 #define IOMMU_EFR_GASUP_SHIFT  7
+#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT  46
 
 #define MAX_DOMAIN_ID 65536
 
@@ -463,7 +463,6 @@ struct amd_irte_ops;
  * independent of their use.
  */
 struct protection_domain {
-       struct list_head list;  /* for list of all protection domains */
        struct list_head dev_list; /* List of all devices in this domain */
        struct iommu_domain domain; /* generic domain handle used by
                                       iommu core code */
index b2fe72a..74d97a8 100644 (file)
@@ -119,7 +119,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
         * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
         */
        reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
-       major = FIELD_GET(ID7_MAJOR, reg);
+       major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
        reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
        if (major >= 2)
                reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
index effe72e..aa3ac2a 100644 (file)
@@ -21,8 +21,7 @@
 #include <linux/io-pgtable.h>
 #include <linux/iommu.h>
 #include <linux/iopoll.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 
 #define STRTAB_STE_0_S1FMT             GENMASK_ULL(5, 4)
 #define STRTAB_STE_0_S1FMT_LINEAR      0
+#define STRTAB_STE_0_S1FMT_64K_L2      2
 #define STRTAB_STE_0_S1CTXPTR_MASK     GENMASK_ULL(51, 6)
 #define STRTAB_STE_0_S1CDMAX           GENMASK_ULL(63, 59)
 
+#define STRTAB_STE_1_S1DSS             GENMASK_ULL(1, 0)
+#define STRTAB_STE_1_S1DSS_TERMINATE   0x0
+#define STRTAB_STE_1_S1DSS_BYPASS      0x1
+#define STRTAB_STE_1_S1DSS_SSID0       0x2
+
 #define STRTAB_STE_1_S1C_CACHE_NC      0UL
 #define STRTAB_STE_1_S1C_CACHE_WBRA    1UL
 #define STRTAB_STE_1_S1C_CACHE_WT      2UL
 
 #define STRTAB_STE_2_S2VMID            GENMASK_ULL(15, 0)
 #define STRTAB_STE_2_VTCR              GENMASK_ULL(50, 32)
+#define STRTAB_STE_2_VTCR_S2T0SZ       GENMASK_ULL(5, 0)
+#define STRTAB_STE_2_VTCR_S2SL0                GENMASK_ULL(7, 6)
+#define STRTAB_STE_2_VTCR_S2IR0                GENMASK_ULL(9, 8)
+#define STRTAB_STE_2_VTCR_S2OR0                GENMASK_ULL(11, 10)
+#define STRTAB_STE_2_VTCR_S2SH0                GENMASK_ULL(13, 12)
+#define STRTAB_STE_2_VTCR_S2TG         GENMASK_ULL(15, 14)
+#define STRTAB_STE_2_VTCR_S2PS         GENMASK_ULL(18, 16)
 #define STRTAB_STE_2_S2AA64            (1UL << 51)
 #define STRTAB_STE_2_S2ENDI            (1UL << 52)
 #define STRTAB_STE_2_S2PTW             (1UL << 54)
 
 #define STRTAB_STE_3_S2TTB_MASK                GENMASK_ULL(51, 4)
 
-/* Context descriptor (stage-1 only) */
+/*
+ * Context descriptors.
+ *
+ * Linear: when less than 1024 SSIDs are supported
+ * 2lvl: at most 1024 L1 entries,
+ *       1024 lazy entries per table.
+ */
+#define CTXDESC_SPLIT                  10
+#define CTXDESC_L2_ENTRIES             (1 << CTXDESC_SPLIT)
+
+#define CTXDESC_L1_DESC_DWORDS         1
+#define CTXDESC_L1_DESC_V              (1UL << 0)
+#define CTXDESC_L1_DESC_L2PTR_MASK     GENMASK_ULL(51, 12)
+
 #define CTXDESC_CD_DWORDS              8
 #define CTXDESC_CD_0_TCR_T0SZ          GENMASK_ULL(5, 0)
-#define ARM64_TCR_T0SZ                 GENMASK_ULL(5, 0)
 #define CTXDESC_CD_0_TCR_TG0           GENMASK_ULL(7, 6)
-#define ARM64_TCR_TG0                  GENMASK_ULL(15, 14)
 #define CTXDESC_CD_0_TCR_IRGN0         GENMASK_ULL(9, 8)
-#define ARM64_TCR_IRGN0                        GENMASK_ULL(9, 8)
 #define CTXDESC_CD_0_TCR_ORGN0         GENMASK_ULL(11, 10)
-#define ARM64_TCR_ORGN0                        GENMASK_ULL(11, 10)
 #define CTXDESC_CD_0_TCR_SH0           GENMASK_ULL(13, 12)
-#define ARM64_TCR_SH0                  GENMASK_ULL(13, 12)
 #define CTXDESC_CD_0_TCR_EPD0          (1ULL << 14)
-#define ARM64_TCR_EPD0                 (1ULL << 7)
 #define CTXDESC_CD_0_TCR_EPD1          (1ULL << 30)
-#define ARM64_TCR_EPD1                 (1ULL << 23)
 
 #define CTXDESC_CD_0_ENDI              (1UL << 15)
 #define CTXDESC_CD_0_V                 (1UL << 31)
 
 #define CTXDESC_CD_0_TCR_IPS           GENMASK_ULL(34, 32)
-#define ARM64_TCR_IPS                  GENMASK_ULL(34, 32)
 #define CTXDESC_CD_0_TCR_TBI0          (1ULL << 38)
-#define ARM64_TCR_TBI0                 (1ULL << 37)
 
 #define CTXDESC_CD_0_AA64              (1UL << 41)
 #define CTXDESC_CD_0_S                 (1UL << 44)
 
 #define CTXDESC_CD_1_TTB0_MASK         GENMASK_ULL(51, 4)
 
-/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
-#define ARM_SMMU_TCR2CD(tcr, fld)      FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \
-                                       FIELD_GET(ARM64_TCR_##fld, tcr))
+/*
+ * When the SMMU only supports linear context descriptor tables, pick a
+ * reasonable size limit (64kB).
+ */
+#define CTXDESC_LINEAR_CDMAX           ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
 
 /* Command queue */
 #define CMDQ_ENT_SZ_SHIFT              4
 #define CMDQ_PREFETCH_1_SIZE           GENMASK_ULL(4, 0)
 #define CMDQ_PREFETCH_1_ADDR_MASK      GENMASK_ULL(63, 12)
 
+#define CMDQ_CFGI_0_SSID               GENMASK_ULL(31, 12)
 #define CMDQ_CFGI_0_SID                        GENMASK_ULL(63, 32)
 #define CMDQ_CFGI_1_LEAF               (1UL << 0)
 #define CMDQ_CFGI_1_RANGE              GENMASK_ULL(4, 0)
 #define MSI_IOVA_BASE                  0x8000000
 #define MSI_IOVA_LENGTH                        0x100000
 
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param_named here.
- */
 static bool disable_bypass = 1;
 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
 MODULE_PARM_DESC(disable_bypass,
@@ -440,8 +455,11 @@ struct arm_smmu_cmdq_ent {
 
                #define CMDQ_OP_CFGI_STE        0x3
                #define CMDQ_OP_CFGI_ALL        0x4
+               #define CMDQ_OP_CFGI_CD         0x5
+               #define CMDQ_OP_CFGI_CD_ALL     0x6
                struct {
                        u32                     sid;
+                       u32                     ssid;
                        union {
                                bool            leaf;
                                u8              span;
@@ -547,16 +565,30 @@ struct arm_smmu_strtab_l1_desc {
        dma_addr_t                      l2ptr_dma;
 };
 
+struct arm_smmu_ctx_desc {
+       u16                             asid;
+       u64                             ttbr;
+       u64                             tcr;
+       u64                             mair;
+};
+
+struct arm_smmu_l1_ctx_desc {
+       __le64                          *l2ptr;
+       dma_addr_t                      l2ptr_dma;
+};
+
+struct arm_smmu_ctx_desc_cfg {
+       __le64                          *cdtab;
+       dma_addr_t                      cdtab_dma;
+       struct arm_smmu_l1_ctx_desc     *l1_desc;
+       unsigned int                    num_l1_ents;
+};
+
 struct arm_smmu_s1_cfg {
-       __le64                          *cdptr;
-       dma_addr_t                      cdptr_dma;
-
-       struct arm_smmu_ctx_desc {
-               u16     asid;
-               u64     ttbr;
-               u64     tcr;
-               u64     mair;
-       }                               cd;
+       struct arm_smmu_ctx_desc_cfg    cdcfg;
+       struct arm_smmu_ctx_desc        cd;
+       u8                              s1fmt;
+       u8                              s1cdmax;
 };
 
 struct arm_smmu_s2_cfg {
@@ -638,6 +670,7 @@ struct arm_smmu_master {
        u32                             *sids;
        unsigned int                    num_sids;
        bool                            ats_enabled;
+       unsigned int                    ssid_bits;
 };
 
 /* SMMU private data for an IOMMU domain */
@@ -847,15 +880,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
                cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
                cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
                break;
+       case CMDQ_OP_CFGI_CD:
+               cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
+               /* Fallthrough */
        case CMDQ_OP_CFGI_STE:
                cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
                cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
                break;
+       case CMDQ_OP_CFGI_CD_ALL:
+               cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
+               break;
        case CMDQ_OP_CFGI_ALL:
                /* Cover the entire SID range */
                cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
                break;
        case CMDQ_OP_TLBI_NH_VA:
+               cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
                cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
                cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
                cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
@@ -1443,50 +1483,238 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
 }
 
 /* Context descriptor manipulation functions */
-static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
+static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+                            int ssid, bool leaf)
 {
-       u64 val = 0;
+       size_t i;
+       unsigned long flags;
+       struct arm_smmu_master *master;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_cmdq_ent cmd = {
+               .opcode = CMDQ_OP_CFGI_CD,
+               .cfgi   = {
+                       .ssid   = ssid,
+                       .leaf   = leaf,
+               },
+       };
+
+       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+       list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+               for (i = 0; i < master->num_sids; i++) {
+                       cmd.cfgi.sid = master->sids[i];
+                       arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+               }
+       }
+       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+       arm_smmu_cmdq_issue_sync(smmu);
+}
 
-       /* Repack the TCR. Just care about TTBR0 for now */
-       val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
-       val |= ARM_SMMU_TCR2CD(tcr, TG0);
-       val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
-       val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
-       val |= ARM_SMMU_TCR2CD(tcr, SH0);
-       val |= ARM_SMMU_TCR2CD(tcr, EPD0);
-       val |= ARM_SMMU_TCR2CD(tcr, EPD1);
-       val |= ARM_SMMU_TCR2CD(tcr, IPS);
+static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
+                                       struct arm_smmu_l1_ctx_desc *l1_desc)
+{
+       size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
 
-       return val;
+       l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
+                                            &l1_desc->l2ptr_dma, GFP_KERNEL);
+       if (!l1_desc->l2ptr) {
+               dev_warn(smmu->dev,
+                        "failed to allocate context descriptor table\n");
+               return -ENOMEM;
+       }
+       return 0;
 }
 
-static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
-                                   struct arm_smmu_s1_cfg *cfg)
+static void arm_smmu_write_cd_l1_desc(__le64 *dst,
+                                     struct arm_smmu_l1_ctx_desc *l1_desc)
 {
-       u64 val;
+       u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
+                 CTXDESC_L1_DESC_V;
+
+       WRITE_ONCE(*dst, cpu_to_le64(val));
+}
+
+static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
+                                  u32 ssid)
+{
+       __le64 *l1ptr;
+       unsigned int idx;
+       struct arm_smmu_l1_ctx_desc *l1_desc;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
 
+       if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+               return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
+
+       idx = ssid >> CTXDESC_SPLIT;
+       l1_desc = &cdcfg->l1_desc[idx];
+       if (!l1_desc->l2ptr) {
+               if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+                       return NULL;
+
+               l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+               arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+               /* An invalid L1CD can be cached */
+               arm_smmu_sync_cd(smmu_domain, ssid, false);
+       }
+       idx = ssid & (CTXDESC_L2_ENTRIES - 1);
+       return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
+}
+
+static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
+                                  int ssid, struct arm_smmu_ctx_desc *cd)
+{
        /*
-        * We don't need to issue any invalidation here, as we'll invalidate
-        * the STE when installing the new entry anyway.
+        * This function handles the following cases:
+        *
+        * (1) Install primary CD, for normal DMA traffic (SSID = 0).
+        * (2) Install a secondary CD, for SID+SSID traffic.
+        * (3) Update ASID of a CD. Atomically write the first 64 bits of the
+        *     CD, then invalidate the old entry and mappings.
+        * (4) Remove a secondary CD.
         */
-       val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
+       u64 val;
+       bool cd_live;
+       __le64 *cdptr;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+       if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+               return -E2BIG;
+
+       cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+       if (!cdptr)
+               return -ENOMEM;
+
+       val = le64_to_cpu(cdptr[0]);
+       cd_live = !!(val & CTXDESC_CD_0_V);
+
+       if (!cd) { /* (4) */
+               val = 0;
+       } else if (cd_live) { /* (3) */
+               val &= ~CTXDESC_CD_0_ASID;
+               val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
+               /*
+                * Until CD+TLB invalidation, both ASIDs may be used for tagging
+                * this substream's traffic
+                */
+       } else { /* (1) and (2) */
+               cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
+               cdptr[2] = 0;
+               cdptr[3] = cpu_to_le64(cd->mair);
+
+               /*
+                * STE is live, and the SMMU might read dwords of this CD in any
+                * order. Ensure that it observes valid values before reading
+                * V=1.
+                */
+               arm_smmu_sync_cd(smmu_domain, ssid, true);
+
+               val = cd->tcr |
 #ifdef __BIG_ENDIAN
-             CTXDESC_CD_0_ENDI |
+                       CTXDESC_CD_0_ENDI |
 #endif
-             CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
-             CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) |
-             CTXDESC_CD_0_V;
+                       CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
+                       CTXDESC_CD_0_AA64 |
+                       FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
+                       CTXDESC_CD_0_V;
 
-       /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
-       if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
-               val |= CTXDESC_CD_0_S;
+               /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
+               if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+                       val |= CTXDESC_CD_0_S;
+       }
 
-       cfg->cdptr[0] = cpu_to_le64(val);
+       /*
+        * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
+        * "Configuration structures and configuration invalidation completion"
+        *
+        *   The size of single-copy atomic reads made by the SMMU is
+        *   IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
+        *   field within an aligned 64-bit span of a structure can be altered
+        *   without first making the structure invalid.
+        */
+       WRITE_ONCE(cdptr[0], cpu_to_le64(val));
+       arm_smmu_sync_cd(smmu_domain, ssid, true);
+       return 0;
+}
+
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+       int ret;
+       size_t l1size;
+       size_t max_contexts;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+       struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
+
+       max_contexts = 1 << cfg->s1cdmax;
+
+       if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
+           max_contexts <= CTXDESC_L2_ENTRIES) {
+               cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
+               cdcfg->num_l1_ents = max_contexts;
 
-       val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
-       cfg->cdptr[1] = cpu_to_le64(val);
+               l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
+       } else {
+               cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
+               cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
+                                                 CTXDESC_L2_ENTRIES);
+
+               cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
+                                             sizeof(*cdcfg->l1_desc),
+                                             GFP_KERNEL);
+               if (!cdcfg->l1_desc)
+                       return -ENOMEM;
+
+               l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+       }
+
+       cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
+                                          GFP_KERNEL);
+       if (!cdcfg->cdtab) {
+               dev_warn(smmu->dev, "failed to allocate context descriptor\n");
+               ret = -ENOMEM;
+               goto err_free_l1;
+       }
+
+       return 0;
 
-       cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair);
+err_free_l1:
+       if (cdcfg->l1_desc) {
+               devm_kfree(smmu->dev, cdcfg->l1_desc);
+               cdcfg->l1_desc = NULL;
+       }
+       return ret;
+}
+
+static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+       int i;
+       size_t size, l1size;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+
+       if (cdcfg->l1_desc) {
+               size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+
+               for (i = 0; i < cdcfg->num_l1_ents; i++) {
+                       if (!cdcfg->l1_desc[i].l2ptr)
+                               continue;
+
+                       dmam_free_coherent(smmu->dev, size,
+                                          cdcfg->l1_desc[i].l2ptr,
+                                          cdcfg->l1_desc[i].l2ptr_dma);
+               }
+               devm_kfree(smmu->dev, cdcfg->l1_desc);
+               cdcfg->l1_desc = NULL;
+
+               l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+       } else {
+               l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+       }
+
+       dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
+       cdcfg->cdtab_dma = 0;
+       cdcfg->cdtab = NULL;
 }
 
 /* Stream table manipulation functions */
@@ -1608,6 +1836,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
        if (s1_cfg) {
                BUG_ON(ste_live);
                dst[1] = cpu_to_le64(
+                        FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
                         FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
                         FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
                         FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
@@ -1617,8 +1846,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                   !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
                        dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
 
-               val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
-                       FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
+               val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+                       FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+                       FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
+                       FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
        }
 
        if (s2_cfg) {
@@ -1642,7 +1873,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                                                 STRTAB_STE_1_EATS_TRANS));
 
        arm_smmu_sync_ste_for_sid(smmu, sid);
-       dst[0] = cpu_to_le64(val);
+       /* See comment in arm_smmu_write_ctx_desc() */
+       WRITE_ONCE(dst[0], cpu_to_le64(val));
        arm_smmu_sync_ste_for_sid(smmu, sid);
 
        /* It's likely that we'll want to use the new STE soon */
@@ -1675,7 +1907,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
 
        desc->span = STRTAB_SPLIT + 1;
        desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
-                                         GFP_KERNEL | __GFP_ZERO);
+                                         GFP_KERNEL);
        if (!desc->l2ptr) {
                dev_err(smmu->dev,
                        "failed to allocate l2 stream table for SID %u\n",
@@ -2131,12 +2363,8 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
 
-               if (cfg->cdptr) {
-                       dmam_free_coherent(smmu_domain->smmu->dev,
-                                          CTXDESC_CD_DWORDS << 3,
-                                          cfg->cdptr,
-                                          cfg->cdptr_dma);
-
+               if (cfg->cdcfg.cdtab) {
+                       arm_smmu_free_cd_tables(smmu_domain);
                        arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
                }
        } else {
@@ -2149,55 +2377,82 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 }
 
 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+                                      struct arm_smmu_master *master,
                                       struct io_pgtable_cfg *pgtbl_cfg)
 {
        int ret;
        int asid;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+       typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
 
        asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
        if (asid < 0)
                return asid;
 
-       cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
-                                        &cfg->cdptr_dma,
-                                        GFP_KERNEL | __GFP_ZERO);
-       if (!cfg->cdptr) {
-               dev_warn(smmu->dev, "failed to allocate context descriptor\n");
-               ret = -ENOMEM;
+       cfg->s1cdmax = master->ssid_bits;
+
+       ret = arm_smmu_alloc_cd_tables(smmu_domain);
+       if (ret)
                goto out_free_asid;
-       }
 
        cfg->cd.asid    = (u16)asid;
-       cfg->cd.ttbr    = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-       cfg->cd.tcr     = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+       cfg->cd.ttbr    = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+       cfg->cd.tcr     = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
+                         CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
        cfg->cd.mair    = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+
+       /*
+        * Note that this will end up calling arm_smmu_sync_cd() before
+        * the master has been added to the devices list for this domain.
+        * This isn't an issue because the STE hasn't been installed yet.
+        */
+       ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
+       if (ret)
+               goto out_free_cd_tables;
+
        return 0;
 
+out_free_cd_tables:
+       arm_smmu_free_cd_tables(smmu_domain);
 out_free_asid:
        arm_smmu_bitmap_free(smmu->asid_map, asid);
        return ret;
 }
 
 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+                                      struct arm_smmu_master *master,
                                       struct io_pgtable_cfg *pgtbl_cfg)
 {
        int vmid;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
+       typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
 
        vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
        if (vmid < 0)
                return vmid;
 
+       vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
        cfg->vmid       = (u16)vmid;
        cfg->vttbr      = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
-       cfg->vtcr       = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+       cfg->vtcr       = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
        return 0;
 }
 
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct iommu_domain *domain,
+                                   struct arm_smmu_master *master)
 {
        int ret;
        unsigned long ias, oas;
@@ -2205,6 +2460,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
        struct io_pgtable_cfg pgtbl_cfg;
        struct io_pgtable_ops *pgtbl_ops;
        int (*finalise_stage_fn)(struct arm_smmu_domain *,
+                                struct arm_smmu_master *,
                                 struct io_pgtable_cfg *);
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -2259,7 +2515,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
        domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
        domain->geometry.force_aperture = true;
 
-       ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+       ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
        if (ret < 0) {
                free_io_pgtable_ops(pgtbl_ops);
                return ret;
@@ -2412,7 +2668,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 
        if (!smmu_domain->smmu) {
                smmu_domain->smmu = smmu;
-               ret = arm_smmu_domain_finalise(domain);
+               ret = arm_smmu_domain_finalise(domain, master);
                if (ret) {
                        smmu_domain->smmu = NULL;
                        goto out_unlock;
@@ -2424,6 +2680,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
                        dev_name(smmu->dev));
                ret = -ENXIO;
                goto out_unlock;
+       } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
+                  master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
+               dev_err(dev,
+                       "cannot attach to incompatible domain (%u SSID bits != %u)\n",
+                       smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
+               ret = -EINVAL;
+               goto out_unlock;
        }
 
        master->domain = smmu_domain;
@@ -2431,9 +2694,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
        if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
                master->ats_enabled = arm_smmu_ats_supported(master);
 
-       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
-               arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
-
        arm_smmu_install_ste_for_dev(master);
 
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
@@ -2534,51 +2794,66 @@ static int arm_smmu_add_device(struct device *dev)
 
        if (!fwspec || fwspec->ops != &arm_smmu_ops)
                return -ENODEV;
-       /*
-        * We _can_ actually withstand dodgy bus code re-calling add_device()
-        * without an intervening remove_device()/of_xlate() sequence, but
-        * we're not going to do so quietly...
-        */
-       if (WARN_ON_ONCE(fwspec->iommu_priv)) {
-               master = fwspec->iommu_priv;
-               smmu = master->smmu;
-       } else {
-               smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
-               if (!smmu)
-                       return -ENODEV;
-               master = kzalloc(sizeof(*master), GFP_KERNEL);
-               if (!master)
-                       return -ENOMEM;
 
-               master->dev = dev;
-               master->smmu = smmu;
-               master->sids = fwspec->ids;
-               master->num_sids = fwspec->num_ids;
-               fwspec->iommu_priv = master;
-       }
+       if (WARN_ON_ONCE(fwspec->iommu_priv))
+               return -EBUSY;
+
+       smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
+       if (!smmu)
+               return -ENODEV;
+
+       master = kzalloc(sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return -ENOMEM;
+
+       master->dev = dev;
+       master->smmu = smmu;
+       master->sids = fwspec->ids;
+       master->num_sids = fwspec->num_ids;
+       fwspec->iommu_priv = master;
 
        /* Check the SIDs are in range of the SMMU and our stream table */
        for (i = 0; i < master->num_sids; i++) {
                u32 sid = master->sids[i];
 
-               if (!arm_smmu_sid_in_range(smmu, sid))
-                       return -ERANGE;
+               if (!arm_smmu_sid_in_range(smmu, sid)) {
+                       ret = -ERANGE;
+                       goto err_free_master;
+               }
 
                /* Ensure l2 strtab is initialised */
                if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
                        ret = arm_smmu_init_l2_strtab(smmu, sid);
                        if (ret)
-                               return ret;
+                               goto err_free_master;
                }
        }
 
+       master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+
+       if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
+               master->ssid_bits = min_t(u8, master->ssid_bits,
+                                         CTXDESC_LINEAR_CDMAX);
+
+       ret = iommu_device_link(&smmu->iommu, dev);
+       if (ret)
+               goto err_free_master;
+
        group = iommu_group_get_for_dev(dev);
-       if (!IS_ERR(group)) {
-               iommu_group_put(group);
-               iommu_device_link(&smmu->iommu, dev);
+       if (IS_ERR(group)) {
+               ret = PTR_ERR(group);
+               goto err_unlink;
        }
 
-       return PTR_ERR_OR_ZERO(group);
+       iommu_group_put(group);
+       return 0;
+
+err_unlink:
+       iommu_device_unlink(&smmu->iommu, dev);
+err_free_master:
+       kfree(master);
+       fwspec->iommu_priv = NULL;
+       return ret;
 }
 
 static void arm_smmu_remove_device(struct device *dev)
@@ -2710,15 +2985,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
        iommu_dma_get_resv_regions(dev, head);
 }
 
-static void arm_smmu_put_resv_regions(struct device *dev,
-                                     struct list_head *head)
-{
-       struct iommu_resv_region *entry, *next;
-
-       list_for_each_entry_safe(entry, next, head, list)
-               kfree(entry);
-}
-
 static struct iommu_ops arm_smmu_ops = {
        .capable                = arm_smmu_capable,
        .domain_alloc           = arm_smmu_domain_alloc,
@@ -2736,7 +3002,7 @@ static struct iommu_ops arm_smmu_ops = {
        .domain_set_attr        = arm_smmu_domain_set_attr,
        .of_xlate               = arm_smmu_of_xlate,
        .get_resv_regions       = arm_smmu_get_resv_regions,
-       .put_resv_regions       = arm_smmu_put_resv_regions,
+       .put_resv_regions       = generic_iommu_put_resv_regions,
        .pgsize_bitmap          = -1UL, /* Restricted during device attach */
 };
 
@@ -2883,7 +3149,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
 
        l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
        strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
-                                    GFP_KERNEL | __GFP_ZERO);
+                                    GFP_KERNEL);
        if (!strtab) {
                dev_err(smmu->dev,
                        "failed to allocate l1 stream table (%u bytes)\n",
@@ -2910,7 +3176,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
 
        size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
        strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
-                                    GFP_KERNEL | __GFP_ZERO);
+                                    GFP_KERNEL);
        if (!strtab) {
                dev_err(smmu->dev,
                        "failed to allocate linear stream table (%u bytes)\n",
@@ -3570,6 +3836,43 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
                return SZ_128K;
 }
 
+static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
+{
+       int err;
+
+#ifdef CONFIG_PCI
+       if (pci_bus_type.iommu_ops != ops) {
+               err = bus_set_iommu(&pci_bus_type, ops);
+               if (err)
+                       return err;
+       }
+#endif
+#ifdef CONFIG_ARM_AMBA
+       if (amba_bustype.iommu_ops != ops) {
+               err = bus_set_iommu(&amba_bustype, ops);
+               if (err)
+                       goto err_reset_pci_ops;
+       }
+#endif
+       if (platform_bus_type.iommu_ops != ops) {
+               err = bus_set_iommu(&platform_bus_type, ops);
+               if (err)
+                       goto err_reset_amba_ops;
+       }
+
+       return 0;
+
+err_reset_amba_ops:
+#ifdef CONFIG_ARM_AMBA
+       bus_set_iommu(&amba_bustype, NULL);
+#endif
+err_reset_pci_ops: __maybe_unused;
+#ifdef CONFIG_PCI
+       bus_set_iommu(&pci_bus_type, NULL);
+#endif
+       return err;
+}
+
 static int arm_smmu_device_probe(struct platform_device *pdev)
 {
        int irq, ret;
@@ -3599,7 +3902,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
 
        /* Base address */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
+       if (resource_size(res) < arm_smmu_resource_size(smmu)) {
                dev_err(dev, "MMIO region too small (%pr)\n", res);
                return -EINVAL;
        }
@@ -3660,48 +3963,45 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                return ret;
        }
 
-#ifdef CONFIG_PCI
-       if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
-               pci_request_acs();
-               ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
-               if (ret)
-                       return ret;
-       }
-#endif
-#ifdef CONFIG_ARM_AMBA
-       if (amba_bustype.iommu_ops != &arm_smmu_ops) {
-               ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
-               if (ret)
-                       return ret;
-       }
-#endif
-       if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
-               ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
-               if (ret)
-                       return ret;
-       }
-       return 0;
+       return arm_smmu_set_bus_ops(&arm_smmu_ops);
 }
 
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
 {
        struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
+       arm_smmu_set_bus_ops(NULL);
+       iommu_device_unregister(&smmu->iommu);
+       iommu_device_sysfs_remove(&smmu->iommu);
        arm_smmu_device_disable(smmu);
+
+       return 0;
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+       arm_smmu_device_remove(pdev);
 }
 
 static const struct of_device_id arm_smmu_of_match[] = {
        { .compatible = "arm,smmu-v3", },
        { },
 };
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
 
 static struct platform_driver arm_smmu_driver = {
        .driver = {
-               .name           = "arm-smmu-v3",
-               .of_match_table = of_match_ptr(arm_smmu_of_match),
-               .suppress_bind_attrs = true,
+               .name                   = "arm-smmu-v3",
+               .of_match_table         = arm_smmu_of_match,
+               .suppress_bind_attrs    = true,
        },
        .probe  = arm_smmu_device_probe,
+       .remove = arm_smmu_device_remove,
        .shutdown = arm_smmu_device_shutdown,
 };
-builtin_platform_driver(arm_smmu_driver);
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu-v3");
+MODULE_LICENSE("GPL v2");
index 4f1a350..16c4b87 100644 (file)
@@ -27,8 +27,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #define MSI_IOVA_LENGTH                        0x100000
 
 static int force_stage;
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param() here.
- */
 module_param(force_stage, int, S_IRUGO);
 MODULE_PARM_DESC(force_stage,
        "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
@@ -131,6 +126,12 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
        return container_of(dom, struct arm_smmu_domain, domain);
 }
 
+static struct platform_driver arm_smmu_driver;
+static struct iommu_ops arm_smmu_ops;
+
+#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
+static int arm_smmu_bus_init(struct iommu_ops *ops);
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
        if (dev_is_pci(dev)) {
@@ -166,9 +167,6 @@ static int __find_legacy_master_phandle(struct device *dev, void *data)
        return err == -ENOENT ? 0 : err;
 }
 
-static struct platform_driver arm_smmu_driver;
-static struct iommu_ops arm_smmu_ops;
-
 static int arm_smmu_register_legacy_master(struct device *dev,
                                           struct arm_smmu_device **smmu)
 {
@@ -220,6 +218,27 @@ static int arm_smmu_register_legacy_master(struct device *dev,
        return err;
 }
 
+/*
+ * With the legacy DT binding in play, we have no guarantees about
+ * probe order, but then we're also not doing default domains, so we can
+ * delay setting bus ops until we're sure every possible SMMU is ready,
+ * and that way ensure that no add_device() calls get missed.
+ */
+static int arm_smmu_legacy_bus_init(void)
+{
+       if (using_legacy_binding)
+               return arm_smmu_bus_init(&arm_smmu_ops);
+       return 0;
+}
+device_initcall_sync(arm_smmu_legacy_bus_init);
+#else
+static int arm_smmu_register_legacy_master(struct device *dev,
+                                          struct arm_smmu_device **smmu)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
+
 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
 {
        int idx;
@@ -252,7 +271,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
        for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
                for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
                        reg = arm_smmu_readl(smmu, page, status);
-                       if (!(reg & sTLBGSTATUS_GSACTIVE))
+                       if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
                                return;
                        cpu_relax();
                }
@@ -459,7 +478,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
        int idx = smmu_domain->cfg.cbndx;
 
        fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
-       if (!(fsr & FSR_FAULT))
+       if (!(fsr & ARM_SMMU_FSR_FAULT))
                return IRQ_NONE;
 
        fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
@@ -491,7 +510,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
 
        if (__ratelimit(&rs)) {
                if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
-                   (gfsr & sGFSR_USF))
+                   (gfsr & ARM_SMMU_sGFSR_USF))
                        dev_err(smmu->dev,
                                "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
                                (u16)gfsynr1);
@@ -521,26 +540,28 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
                if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
                        cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
                } else {
-                       cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
-                       cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-                       cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
+                       cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
+                       cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
                        if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
-                               cb->tcr[1] |= TCR2_AS;
+                               cb->tcr[1] |= ARM_SMMU_TCR2_AS;
+                       else
+                               cb->tcr[0] |= ARM_SMMU_TCR_EAE;
                }
        } else {
-               cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+               cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
        }
 
        /* TTBRs */
        if (stage1) {
                if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
-                       cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
-                       cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
+                       cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
+                       cb->ttbr[1] = 0;
                } else {
-                       cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-                       cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
-                       cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
-                       cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
+                       cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+                       cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+                                                 cfg->asid);
+                       cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+                                                cfg->asid);
                }
        } else {
                cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
@@ -576,31 +597,33 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
        /* CBA2R */
        if (smmu->version > ARM_SMMU_V1) {
                if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
-                       reg = CBA2R_VA64;
+                       reg = ARM_SMMU_CBA2R_VA64;
                else
                        reg = 0;
                /* 16-bit VMIDs live in CBA2R */
                if (smmu->features & ARM_SMMU_FEAT_VMID16)
-                       reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
+                       reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
 
                arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
        }
 
        /* CBAR */
-       reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
+       reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
        if (smmu->version < ARM_SMMU_V2)
-               reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
+               reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
 
        /*
         * Use the weakest shareability/memory types, so they are
         * overridden by the ttbcr/pte.
         */
        if (stage1) {
-               reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
-                       FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
+               reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
+                                 ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
+                      FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
+                                 ARM_SMMU_CBAR_S1_MEMATTR_WB);
        } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
                /* 8-bit VMIDs live in CBAR */
-               reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
+               reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
        }
        arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
 
@@ -632,11 +655,12 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
        }
 
        /* SCTLR */
-       reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
+       reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
+             ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
        if (stage1)
-               reg |= SCTLR_S1_ASIDPNE;
+               reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
        if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
-               reg |= SCTLR_E;
+               reg |= ARM_SMMU_SCTLR_E;
 
        arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
 }
@@ -818,7 +842,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        if (ret < 0) {
                dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
                        cfg->irptndx, irq);
-               cfg->irptndx = INVALID_IRPTNDX;
+               cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
        }
 
        mutex_unlock(&smmu_domain->init_mutex);
@@ -856,7 +880,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        smmu->cbs[cfg->cbndx].cfg = NULL;
        arm_smmu_write_context_bank(smmu, cfg->cbndx);
 
-       if (cfg->irptndx != INVALID_IRPTNDX) {
+       if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
                irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
                devm_free_irq(smmu->dev, irq, domain);
        }
@@ -912,23 +936,24 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
 {
        struct arm_smmu_smr *smr = smmu->smrs + idx;
-       u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
+       u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
+                 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
 
        if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
-               reg |= SMR_VALID;
+               reg |= ARM_SMMU_SMR_VALID;
        arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
 }
 
 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
 {
        struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
-       u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
-                 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
-                 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
+       u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
+                 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
+                 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
 
        if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
            smmu->smrs[idx].valid)
-               reg |= S2CR_EXIDVALID;
+               reg |= ARM_SMMU_S2CR_EXIDVALID;
        arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
 }
 
@@ -946,24 +971,37 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
 {
        u32 smr;
+       int i;
 
        if (!smmu->smrs)
                return;
-
+       /*
+        * If we've had to accommodate firmware memory regions, we may
+        * have live SMRs by now; tread carefully...
+        *
+        * Somewhat perversely, not having a free SMR for this test implies we
+        * can get away without it anyway, as we'll only be able to 'allocate'
+        * these SMRs for the ID/mask values we're already trusting to be OK.
+        */
+       for (i = 0; i < smmu->num_mapping_groups; i++)
+               if (!smmu->smrs[i].valid)
+                       goto smr_ok;
+       return;
+smr_ok:
        /*
         * SMR.ID bits may not be preserved if the corresponding MASK
         * bits are set, so check each one separately. We can reject
         * masters later if they try to claim IDs outside these masks.
         */
-       smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
-       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
-       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
-       smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
+       smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+       smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
 
-       smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
-       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
-       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
-       smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
+       smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+       smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
 }
 
 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
@@ -1032,8 +1070,8 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
        mutex_lock(&smmu->stream_map_mutex);
        /* Figure out a viable stream map entry allocation */
        for_each_cfg_sme(fwspec, i, idx) {
-               u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
-               u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
+               u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+               u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
 
                if (idx != INVALID_SMENDX) {
                        ret = -EEXIST;
@@ -1277,7 +1315,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
                arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
 
        reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
-       if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
+       if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
+                                     5, 50)) {
                spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
                dev_err(dev,
                        "iova to phys timed out on %pad. Falling back to software table walk.\n",
@@ -1287,7 +1326,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
 
        phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
        spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
-       if (phys & CB_PAR_F) {
+       if (phys & ARM_SMMU_CB_PAR_F) {
                dev_err(dev, "translation fault!\n");
                dev_err(dev, "PAR = 0x%llx\n", phys);
                return 0;
@@ -1368,8 +1407,8 @@ static int arm_smmu_add_device(struct device *dev)
 
        ret = -EINVAL;
        for (i = 0; i < fwspec->num_ids; i++) {
-               u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
-               u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
+               u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+               u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
 
                if (sid & ~smmu->streamid_mask) {
                        dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
@@ -1550,12 +1589,12 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
        u32 mask, fwid = 0;
 
        if (args->args_count > 0)
-               fwid |= FIELD_PREP(SMR_ID, args->args[0]);
+               fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
 
        if (args->args_count > 1)
-               fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
+               fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
        else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
-               fwid |= FIELD_PREP(SMR_MASK, mask);
+               fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
 
        return iommu_fwspec_add_ids(dev, &fwid, 1);
 }
@@ -1576,15 +1615,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
        iommu_dma_get_resv_regions(dev, head);
 }
 
-static void arm_smmu_put_resv_regions(struct device *dev,
-                                     struct list_head *head)
-{
-       struct iommu_resv_region *entry, *next;
-
-       list_for_each_entry_safe(entry, next, head, list)
-               kfree(entry);
-}
-
 static struct iommu_ops arm_smmu_ops = {
        .capable                = arm_smmu_capable,
        .domain_alloc           = arm_smmu_domain_alloc,
@@ -1602,7 +1632,7 @@ static struct iommu_ops arm_smmu_ops = {
        .domain_set_attr        = arm_smmu_domain_set_attr,
        .of_xlate               = arm_smmu_of_xlate,
        .get_resv_regions       = arm_smmu_get_resv_regions,
-       .put_resv_regions       = arm_smmu_put_resv_regions,
+       .put_resv_regions       = generic_iommu_put_resv_regions,
        .pgsize_bitmap          = -1UL, /* Restricted during device attach */
 };
 
@@ -1625,7 +1655,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        /* Make sure all context banks are disabled and clear CB_FSR  */
        for (i = 0; i < smmu->num_context_banks; ++i) {
                arm_smmu_write_context_bank(smmu, i);
-               arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
+               arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
        }
 
        /* Invalidate the TLB, just in case */
@@ -1635,29 +1665,30 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
 
        /* Enable fault reporting */
-       reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+       reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
+               ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
 
        /* Disable TLB broadcasting. */
-       reg |= (sCR0_VMIDPNE | sCR0_PTM);
+       reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
 
        /* Enable client access, handling unmatched streams as appropriate */
-       reg &= ~sCR0_CLIENTPD;
+       reg &= ~ARM_SMMU_sCR0_CLIENTPD;
        if (disable_bypass)
-               reg |= sCR0_USFCFG;
+               reg |= ARM_SMMU_sCR0_USFCFG;
        else
-               reg &= ~sCR0_USFCFG;
+               reg &= ~ARM_SMMU_sCR0_USFCFG;
 
        /* Disable forced broadcasting */
-       reg &= ~sCR0_FB;
+       reg &= ~ARM_SMMU_sCR0_FB;
 
        /* Don't upgrade barriers */
-       reg &= ~(sCR0_BSU);
+       reg &= ~(ARM_SMMU_sCR0_BSU);
 
        if (smmu->features & ARM_SMMU_FEAT_VMID16)
-               reg |= sCR0_VMID16EN;
+               reg |= ARM_SMMU_sCR0_VMID16EN;
 
        if (smmu->features & ARM_SMMU_FEAT_EXIDS)
-               reg |= sCR0_EXIDENABLE;
+               reg |= ARM_SMMU_sCR0_EXIDENABLE;
 
        if (smmu->impl && smmu->impl->reset)
                smmu->impl->reset(smmu);
@@ -1702,21 +1733,21 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
        /* Restrict available stages based on module parameter */
        if (force_stage == 1)
-               id &= ~(ID0_S2TS | ID0_NTS);
+               id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
        else if (force_stage == 2)
-               id &= ~(ID0_S1TS | ID0_NTS);
+               id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
 
-       if (id & ID0_S1TS) {
+       if (id & ARM_SMMU_ID0_S1TS) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
                dev_notice(smmu->dev, "\tstage 1 translation\n");
        }
 
-       if (id & ID0_S2TS) {
+       if (id & ARM_SMMU_ID0_S2TS) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
                dev_notice(smmu->dev, "\tstage 2 translation\n");
        }
 
-       if (id & ID0_NTS) {
+       if (id & ARM_SMMU_ID0_NTS) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
                dev_notice(smmu->dev, "\tnested translation\n");
        }
@@ -1727,8 +1758,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                return -ENODEV;
        }
 
-       if ((id & ID0_S1TS) &&
-               ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
+       if ((id & ARM_SMMU_ID0_S1TS) &&
+           ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
                dev_notice(smmu->dev, "\taddress translation ops\n");
        }
@@ -1739,7 +1770,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
         * Fortunately, this also opens up a workaround for systems where the
         * ID register value has ended up configured incorrectly.
         */
-       cttw_reg = !!(id & ID0_CTTW);
+       cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
        if (cttw_fw || cttw_reg)
                dev_notice(smmu->dev, "\t%scoherent table walk\n",
                           cttw_fw ? "" : "non-");
@@ -1748,16 +1779,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                           "\t(IDR0.CTTW overridden by FW configuration)\n");
 
        /* Max. number of entries we have for stream matching/indexing */
-       if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+       if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
                smmu->features |= ARM_SMMU_FEAT_EXIDS;
                size = 1 << 16;
        } else {
-               size = 1 << FIELD_GET(ID0_NUMSIDB, id);
+               size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
        }
        smmu->streamid_mask = size - 1;
-       if (id & ID0_SMS) {
+       if (id & ARM_SMMU_ID0_SMS) {
                smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
-               size = FIELD_GET(ID0_NUMSMRG, id);
+               size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
                if (size == 0) {
                        dev_err(smmu->dev,
                                "stream-matching supported, but no SMRs present!\n");
@@ -1785,18 +1816,19 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        mutex_init(&smmu->stream_map_mutex);
        spin_lock_init(&smmu->global_sync_lock);
 
-       if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
+       if (smmu->version < ARM_SMMU_V2 ||
+           !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
                smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
-               if (!(id & ID0_PTFS_NO_AARCH32S))
+               if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
        }
 
        /* ID1 */
        id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
-       smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
+       smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
 
        /* Check for size mismatch of SMMU address space from mapped region */
-       size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
+       size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
        if (smmu->numpage != 2 * size << smmu->pgshift)
                dev_warn(smmu->dev,
                        "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
@@ -1804,8 +1836,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
        smmu->numpage = size;
 
-       smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
-       smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
+       smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
+       smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
        if (smmu->num_s2_context_banks > smmu->num_context_banks) {
                dev_err(smmu->dev, "impossible number of S2 context banks!\n");
                return -ENODEV;
@@ -1819,14 +1851,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
        /* ID2 */
        id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
-       size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
+       size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
        smmu->ipa_size = size;
 
        /* The output mask is also applied for bypass */
-       size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
+       size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
        smmu->pa_size = size;
 
-       if (id & ID2_VMID16)
+       if (id & ARM_SMMU_ID2_VMID16)
                smmu->features |= ARM_SMMU_FEAT_VMID16;
 
        /*
@@ -1843,13 +1875,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                if (smmu->version == ARM_SMMU_V1_64K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
        } else {
-               size = FIELD_GET(ID2_UBS, id);
+               size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
                smmu->va_size = arm_smmu_id_size_to_bits(size);
-               if (id & ID2_PTFS_4K)
+               if (id & ARM_SMMU_ID2_PTFS_4K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
-               if (id & ID2_PTFS_16K)
+               if (id & ARM_SMMU_ID2_PTFS_16K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
-               if (id & ID2_PTFS_64K)
+               if (id & ARM_SMMU_ID2_PTFS_64K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
        }
 
@@ -1911,6 +1943,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
        { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
        { },
 };
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
 
 #ifdef CONFIG_ACPI
 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
@@ -1997,8 +2030,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
 
        legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
        if (legacy_binding && !using_generic_binding) {
-               if (!using_legacy_binding)
-                       pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
+               if (!using_legacy_binding) {
+                       pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
+                                 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
+               }
                using_legacy_binding = true;
        } else if (!legacy_binding && !using_legacy_binding) {
                using_generic_binding = true;
@@ -2013,25 +2048,50 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
        return 0;
 }
 
-static void arm_smmu_bus_init(void)
+static int arm_smmu_bus_init(struct iommu_ops *ops)
 {
+       int err;
+
        /* Oh, for a proper bus abstraction */
-       if (!iommu_present(&platform_bus_type))
-               bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+       if (!iommu_present(&platform_bus_type)) {
+               err = bus_set_iommu(&platform_bus_type, ops);
+               if (err)
+                       return err;
+       }
 #ifdef CONFIG_ARM_AMBA
-       if (!iommu_present(&amba_bustype))
-               bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+       if (!iommu_present(&amba_bustype)) {
+               err = bus_set_iommu(&amba_bustype, ops);
+               if (err)
+                       goto err_reset_platform_ops;
+       }
 #endif
 #ifdef CONFIG_PCI
        if (!iommu_present(&pci_bus_type)) {
-               pci_request_acs();
-               bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+               err = bus_set_iommu(&pci_bus_type, ops);
+               if (err)
+                       goto err_reset_amba_ops;
        }
 #endif
 #ifdef CONFIG_FSL_MC_BUS
-       if (!iommu_present(&fsl_mc_bus_type))
-               bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+       if (!iommu_present(&fsl_mc_bus_type)) {
+               err = bus_set_iommu(&fsl_mc_bus_type, ops);
+               if (err)
+                       goto err_reset_pci_ops;
+       }
 #endif
+       return 0;
+
+err_reset_pci_ops: __maybe_unused;
+#ifdef CONFIG_PCI
+       bus_set_iommu(&pci_bus_type, NULL);
+#endif
+err_reset_amba_ops: __maybe_unused;
+#ifdef CONFIG_ARM_AMBA
+       bus_set_iommu(&amba_bustype, NULL);
+#endif
+err_reset_platform_ops: __maybe_unused;
+       bus_set_iommu(&platform_bus_type, NULL);
+       return err;
 }
 
 static int arm_smmu_device_probe(struct platform_device *pdev)
@@ -2177,38 +2237,28 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
         * ready to handle default domain setup as soon as any SMMU exists.
         */
        if (!using_legacy_binding)
-               arm_smmu_bus_init();
+               return arm_smmu_bus_init(&arm_smmu_ops);
 
        return 0;
 }
 
-/*
- * With the legacy DT binding in play, though, we have no guarantees about
- * probe order, but then we're also not doing default domains, so we can
- * delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no add_device() calls get missed.
- */
-static int arm_smmu_legacy_bus_init(void)
-{
-       if (using_legacy_binding)
-               arm_smmu_bus_init();
-       return 0;
-}
-device_initcall_sync(arm_smmu_legacy_bus_init);
-
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
 {
        struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
        if (!smmu)
-               return;
+               return -ENODEV;
 
        if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
                dev_err(&pdev->dev, "removing device with active domains!\n");
 
+       arm_smmu_bus_init(NULL);
+       iommu_device_unregister(&smmu->iommu);
+       iommu_device_sysfs_remove(&smmu->iommu);
+
        arm_smmu_rpm_get(smmu);
        /* Turn the thing off */
-       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
        arm_smmu_rpm_put(smmu);
 
        if (pm_runtime_enabled(smmu->dev))
@@ -2217,6 +2267,12 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev)
                clk_bulk_disable(smmu->num_clks, smmu->clks);
 
        clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+       return 0;
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+       arm_smmu_device_remove(pdev);
 }
 
 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
@@ -2267,11 +2323,17 @@ static const struct dev_pm_ops arm_smmu_pm_ops = {
 static struct platform_driver arm_smmu_driver = {
        .driver = {
                .name                   = "arm-smmu",
-               .of_match_table         = of_match_ptr(arm_smmu_of_match),
+               .of_match_table         = arm_smmu_of_match,
                .pm                     = &arm_smmu_pm_ops,
-               .suppress_bind_attrs    = true,
+               .suppress_bind_attrs    = true,
        },
        .probe  = arm_smmu_device_probe,
+       .remove = arm_smmu_device_remove,
        .shutdown = arm_smmu_device_shutdown,
 };
-builtin_platform_driver(arm_smmu_driver);
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu");
+MODULE_LICENSE("GPL v2");
index 62b9f0c..8d1cd54 100644 (file)
@@ -11,6 +11,7 @@
 #define _ARM_SMMU_H
 
 #include <linux/atomic.h>
+#include <linux/bitfield.h>
 #include <linux/bits.h>
 #include <linux/clk.h>
 #include <linux/device.h>
 
 /* Configuration registers */
 #define ARM_SMMU_GR0_sCR0              0x0
-#define sCR0_VMID16EN                  BIT(31)
-#define sCR0_BSU                       GENMASK(15, 14)
-#define sCR0_FB                                BIT(13)
-#define sCR0_PTM                       BIT(12)
-#define sCR0_VMIDPNE                   BIT(11)
-#define sCR0_USFCFG                    BIT(10)
-#define sCR0_GCFGFIE                   BIT(5)
-#define sCR0_GCFGFRE                   BIT(4)
-#define sCR0_EXIDENABLE                        BIT(3)
-#define sCR0_GFIE                      BIT(2)
-#define sCR0_GFRE                      BIT(1)
-#define sCR0_CLIENTPD                  BIT(0)
+#define ARM_SMMU_sCR0_VMID16EN         BIT(31)
+#define ARM_SMMU_sCR0_BSU              GENMASK(15, 14)
+#define ARM_SMMU_sCR0_FB               BIT(13)
+#define ARM_SMMU_sCR0_PTM              BIT(12)
+#define ARM_SMMU_sCR0_VMIDPNE          BIT(11)
+#define ARM_SMMU_sCR0_USFCFG           BIT(10)
+#define ARM_SMMU_sCR0_GCFGFIE          BIT(5)
+#define ARM_SMMU_sCR0_GCFGFRE          BIT(4)
+#define ARM_SMMU_sCR0_EXIDENABLE       BIT(3)
+#define ARM_SMMU_sCR0_GFIE             BIT(2)
+#define ARM_SMMU_sCR0_GFRE             BIT(1)
+#define ARM_SMMU_sCR0_CLIENTPD         BIT(0)
 
 /* Auxiliary Configuration register */
 #define ARM_SMMU_GR0_sACR              0x10
 
 /* Identification registers */
 #define ARM_SMMU_GR0_ID0               0x20
-#define ID0_S1TS                       BIT(30)
-#define ID0_S2TS                       BIT(29)
-#define ID0_NTS                                BIT(28)
-#define ID0_SMS                                BIT(27)
-#define ID0_ATOSNS                     BIT(26)
-#define ID0_PTFS_NO_AARCH32            BIT(25)
-#define ID0_PTFS_NO_AARCH32S           BIT(24)
-#define ID0_NUMIRPT                    GENMASK(23, 16)
-#define ID0_CTTW                       BIT(14)
-#define ID0_NUMSIDB                    GENMASK(12, 9)
-#define ID0_EXIDS                      BIT(8)
-#define ID0_NUMSMRG                    GENMASK(7, 0)
+#define ARM_SMMU_ID0_S1TS              BIT(30)
+#define ARM_SMMU_ID0_S2TS              BIT(29)
+#define ARM_SMMU_ID0_NTS               BIT(28)
+#define ARM_SMMU_ID0_SMS               BIT(27)
+#define ARM_SMMU_ID0_ATOSNS            BIT(26)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32   BIT(25)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32S  BIT(24)
+#define ARM_SMMU_ID0_NUMIRPT           GENMASK(23, 16)
+#define ARM_SMMU_ID0_CTTW              BIT(14)
+#define ARM_SMMU_ID0_NUMSIDB           GENMASK(12, 9)
+#define ARM_SMMU_ID0_EXIDS             BIT(8)
+#define ARM_SMMU_ID0_NUMSMRG           GENMASK(7, 0)
 
 #define ARM_SMMU_GR0_ID1               0x24
-#define ID1_PAGESIZE                   BIT(31)
-#define ID1_NUMPAGENDXB                        GENMASK(30, 28)
-#define ID1_NUMS2CB                    GENMASK(23, 16)
-#define ID1_NUMCB                      GENMASK(7, 0)
+#define ARM_SMMU_ID1_PAGESIZE          BIT(31)
+#define ARM_SMMU_ID1_NUMPAGENDXB       GENMASK(30, 28)
+#define ARM_SMMU_ID1_NUMS2CB           GENMASK(23, 16)
+#define ARM_SMMU_ID1_NUMCB             GENMASK(7, 0)
 
 #define ARM_SMMU_GR0_ID2               0x28
-#define ID2_VMID16                     BIT(15)
-#define ID2_PTFS_64K                   BIT(14)
-#define ID2_PTFS_16K                   BIT(13)
-#define ID2_PTFS_4K                    BIT(12)
-#define ID2_UBS                                GENMASK(11, 8)
-#define ID2_OAS                                GENMASK(7, 4)
-#define ID2_IAS                                GENMASK(3, 0)
+#define ARM_SMMU_ID2_VMID16            BIT(15)
+#define ARM_SMMU_ID2_PTFS_64K          BIT(14)
+#define ARM_SMMU_ID2_PTFS_16K          BIT(13)
+#define ARM_SMMU_ID2_PTFS_4K           BIT(12)
+#define ARM_SMMU_ID2_UBS               GENMASK(11, 8)
+#define ARM_SMMU_ID2_OAS               GENMASK(7, 4)
+#define ARM_SMMU_ID2_IAS               GENMASK(3, 0)
 
 #define ARM_SMMU_GR0_ID3               0x2c
 #define ARM_SMMU_GR0_ID4               0x30
 #define ARM_SMMU_GR0_ID6               0x38
 
 #define ARM_SMMU_GR0_ID7               0x3c
-#define ID7_MAJOR                      GENMASK(7, 4)
-#define ID7_MINOR                      GENMASK(3, 0)
+#define ARM_SMMU_ID7_MAJOR             GENMASK(7, 4)
+#define ARM_SMMU_ID7_MINOR             GENMASK(3, 0)
 
 #define ARM_SMMU_GR0_sGFSR             0x48
-#define sGFSR_USF                      BIT(1)
+#define ARM_SMMU_sGFSR_USF             BIT(1)
 
 #define ARM_SMMU_GR0_sGFSYNR0          0x50
 #define ARM_SMMU_GR0_sGFSYNR1          0x54
 #define ARM_SMMU_GR0_sTLBGSYNC         0x70
 
 #define ARM_SMMU_GR0_sTLBGSTATUS       0x74
-#define sTLBGSTATUS_GSACTIVE           BIT(0)
+#define ARM_SMMU_sTLBGSTATUS_GSACTIVE  BIT(0)
 
 /* Stream mapping registers */
 #define ARM_SMMU_GR0_SMR(n)            (0x800 + ((n) << 2))
-#define SMR_VALID                      BIT(31)
-#define SMR_MASK                       GENMASK(31, 16)
-#define SMR_ID                         GENMASK(15, 0)
+#define ARM_SMMU_SMR_VALID             BIT(31)
+#define ARM_SMMU_SMR_MASK              GENMASK(31, 16)
+#define ARM_SMMU_SMR_ID                        GENMASK(15, 0)
 
 #define ARM_SMMU_GR0_S2CR(n)           (0xc00 + ((n) << 2))
-#define S2CR_PRIVCFG                   GENMASK(25, 24)
+#define ARM_SMMU_S2CR_PRIVCFG          GENMASK(25, 24)
 enum arm_smmu_s2cr_privcfg {
        S2CR_PRIVCFG_DEFAULT,
        S2CR_PRIVCFG_DIPAN,
        S2CR_PRIVCFG_UNPRIV,
        S2CR_PRIVCFG_PRIV,
 };
-#define S2CR_TYPE                      GENMASK(17, 16)
+#define ARM_SMMU_S2CR_TYPE             GENMASK(17, 16)
 enum arm_smmu_s2cr_type {
        S2CR_TYPE_TRANS,
        S2CR_TYPE_BYPASS,
        S2CR_TYPE_FAULT,
 };
-#define S2CR_EXIDVALID                 BIT(10)
-#define S2CR_CBNDX                     GENMASK(7, 0)
+#define ARM_SMMU_S2CR_EXIDVALID                BIT(10)
+#define ARM_SMMU_S2CR_CBNDX            GENMASK(7, 0)
 
 /* Context bank attribute registers */
 #define ARM_SMMU_GR1_CBAR(n)           (0x0 + ((n) << 2))
-#define CBAR_IRPTNDX                   GENMASK(31, 24)
-#define CBAR_TYPE                      GENMASK(17, 16)
+#define ARM_SMMU_CBAR_IRPTNDX          GENMASK(31, 24)
+#define ARM_SMMU_CBAR_TYPE             GENMASK(17, 16)
 enum arm_smmu_cbar_type {
        CBAR_TYPE_S2_TRANS,
        CBAR_TYPE_S1_TRANS_S2_BYPASS,
        CBAR_TYPE_S1_TRANS_S2_FAULT,
        CBAR_TYPE_S1_TRANS_S2_TRANS,
 };
-#define CBAR_S1_MEMATTR                        GENMASK(15, 12)
-#define CBAR_S1_MEMATTR_WB             0xf
-#define CBAR_S1_BPSHCFG                        GENMASK(9, 8)
-#define CBAR_S1_BPSHCFG_NSH            3
-#define CBAR_VMID                      GENMASK(7, 0)
+#define ARM_SMMU_CBAR_S1_MEMATTR       GENMASK(15, 12)
+#define ARM_SMMU_CBAR_S1_MEMATTR_WB    0xf
+#define ARM_SMMU_CBAR_S1_BPSHCFG       GENMASK(9, 8)
+#define ARM_SMMU_CBAR_S1_BPSHCFG_NSH   3
+#define ARM_SMMU_CBAR_VMID             GENMASK(7, 0)
 
 #define ARM_SMMU_GR1_CBFRSYNRA(n)      (0x400 + ((n) << 2))
 
 #define ARM_SMMU_GR1_CBA2R(n)          (0x800 + ((n) << 2))
-#define CBA2R_VMID16                   GENMASK(31, 16)
-#define CBA2R_VA64                     BIT(0)
+#define ARM_SMMU_CBA2R_VMID16          GENMASK(31, 16)
+#define ARM_SMMU_CBA2R_VA64            BIT(0)
 
 #define ARM_SMMU_CB_SCTLR              0x0
-#define SCTLR_S1_ASIDPNE               BIT(12)
-#define SCTLR_CFCFG                    BIT(7)
-#define SCTLR_CFIE                     BIT(6)
-#define SCTLR_CFRE                     BIT(5)
-#define SCTLR_E                                BIT(4)
-#define SCTLR_AFE                      BIT(2)
-#define SCTLR_TRE                      BIT(1)
-#define SCTLR_M                                BIT(0)
+#define ARM_SMMU_SCTLR_S1_ASIDPNE      BIT(12)
+#define ARM_SMMU_SCTLR_CFCFG           BIT(7)
+#define ARM_SMMU_SCTLR_CFIE            BIT(6)
+#define ARM_SMMU_SCTLR_CFRE            BIT(5)
+#define ARM_SMMU_SCTLR_E               BIT(4)
+#define ARM_SMMU_SCTLR_AFE             BIT(2)
+#define ARM_SMMU_SCTLR_TRE             BIT(1)
+#define ARM_SMMU_SCTLR_M               BIT(0)
 
 #define ARM_SMMU_CB_ACTLR              0x4
 
 #define ARM_SMMU_CB_RESUME             0x8
-#define RESUME_TERMINATE               BIT(0)
+#define ARM_SMMU_RESUME_TERMINATE      BIT(0)
 
 #define ARM_SMMU_CB_TCR2               0x10
-#define TCR2_SEP                       GENMASK(17, 15)
-#define TCR2_SEP_UPSTREAM              0x7
-#define TCR2_AS                                BIT(4)
+#define ARM_SMMU_TCR2_SEP              GENMASK(17, 15)
+#define ARM_SMMU_TCR2_SEP_UPSTREAM     0x7
+#define ARM_SMMU_TCR2_AS               BIT(4)
+#define ARM_SMMU_TCR2_PASIZE           GENMASK(3, 0)
 
 #define ARM_SMMU_CB_TTBR0              0x20
 #define ARM_SMMU_CB_TTBR1              0x28
-#define TTBRn_ASID                     GENMASK_ULL(63, 48)
+#define ARM_SMMU_TTBRn_ASID            GENMASK_ULL(63, 48)
 
 #define ARM_SMMU_CB_TCR                        0x30
+#define ARM_SMMU_TCR_EAE               BIT(31)
+#define ARM_SMMU_TCR_EPD1              BIT(23)
+#define ARM_SMMU_TCR_TG0               GENMASK(15, 14)
+#define ARM_SMMU_TCR_SH0               GENMASK(13, 12)
+#define ARM_SMMU_TCR_ORGN0             GENMASK(11, 10)
+#define ARM_SMMU_TCR_IRGN0             GENMASK(9, 8)
+#define ARM_SMMU_TCR_T0SZ              GENMASK(5, 0)
+
+#define ARM_SMMU_VTCR_RES1             BIT(31)
+#define ARM_SMMU_VTCR_PS               GENMASK(18, 16)
+#define ARM_SMMU_VTCR_TG0              ARM_SMMU_TCR_TG0
+#define ARM_SMMU_VTCR_SH0              ARM_SMMU_TCR_SH0
+#define ARM_SMMU_VTCR_ORGN0            ARM_SMMU_TCR_ORGN0
+#define ARM_SMMU_VTCR_IRGN0            ARM_SMMU_TCR_IRGN0
+#define ARM_SMMU_VTCR_SL0              GENMASK(7, 6)
+#define ARM_SMMU_VTCR_T0SZ             ARM_SMMU_TCR_T0SZ
+
 #define ARM_SMMU_CB_CONTEXTIDR         0x34
 #define ARM_SMMU_CB_S1_MAIR0           0x38
 #define ARM_SMMU_CB_S1_MAIR1           0x3c
 
 #define ARM_SMMU_CB_PAR                        0x50
-#define CB_PAR_F                       BIT(0)
+#define ARM_SMMU_CB_PAR_F              BIT(0)
 
 #define ARM_SMMU_CB_FSR                        0x58
-#define FSR_MULTI                      BIT(31)
-#define FSR_SS                         BIT(30)
-#define FSR_UUT                                BIT(8)
-#define FSR_ASF                                BIT(7)
-#define FSR_TLBLKF                     BIT(6)
-#define FSR_TLBMCF                     BIT(5)
-#define FSR_EF                         BIT(4)
-#define FSR_PF                         BIT(3)
-#define FSR_AFF                                BIT(2)
-#define FSR_TF                         BIT(1)
-
-#define FSR_IGN                                (FSR_AFF | FSR_ASF | \
-                                        FSR_TLBMCF | FSR_TLBLKF)
-#define FSR_FAULT                      (FSR_MULTI | FSR_SS | FSR_UUT | \
-                                        FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
+#define ARM_SMMU_FSR_MULTI             BIT(31)
+#define ARM_SMMU_FSR_SS                        BIT(30)
+#define ARM_SMMU_FSR_UUT               BIT(8)
+#define ARM_SMMU_FSR_ASF               BIT(7)
+#define ARM_SMMU_FSR_TLBLKF            BIT(6)
+#define ARM_SMMU_FSR_TLBMCF            BIT(5)
+#define ARM_SMMU_FSR_EF                        BIT(4)
+#define ARM_SMMU_FSR_PF                        BIT(3)
+#define ARM_SMMU_FSR_AFF               BIT(2)
+#define ARM_SMMU_FSR_TF                        BIT(1)
+
+#define ARM_SMMU_FSR_IGN               (ARM_SMMU_FSR_AFF |             \
+                                        ARM_SMMU_FSR_ASF |             \
+                                        ARM_SMMU_FSR_TLBMCF |          \
+                                        ARM_SMMU_FSR_TLBLKF)
+
+#define ARM_SMMU_FSR_FAULT             (ARM_SMMU_FSR_MULTI |           \
+                                        ARM_SMMU_FSR_SS |              \
+                                        ARM_SMMU_FSR_UUT |             \
+                                        ARM_SMMU_FSR_EF |              \
+                                        ARM_SMMU_FSR_PF |              \
+                                        ARM_SMMU_FSR_TF |              \
+                                        ARM_SMMU_FSR_IGN)
 
 #define ARM_SMMU_CB_FAR                        0x60
 
 #define ARM_SMMU_CB_FSYNR0             0x68
-#define FSYNR0_WNR                     BIT(4)
+#define ARM_SMMU_FSYNR0_WNR            BIT(4)
 
 #define ARM_SMMU_CB_S1_TLBIVA          0x600
 #define ARM_SMMU_CB_S1_TLBIASID                0x610
@@ -203,7 +230,7 @@ enum arm_smmu_cbar_type {
 #define ARM_SMMU_CB_ATS1PR             0x800
 
 #define ARM_SMMU_CB_ATSR               0x8f0
-#define ATSR_ACTIVE                    BIT(0)
+#define ARM_SMMU_ATSR_ACTIVE           BIT(0)
 
 
 /* Maximum number of context banks per SMMU */
@@ -297,7 +324,7 @@ struct arm_smmu_cfg {
        enum arm_smmu_cbar_type         cbar;
        enum arm_smmu_context_fmt       fmt;
 };
-#define INVALID_IRPTNDX                        0xff
+#define ARM_SMMU_INVALID_IRPTNDX       0xff
 
 enum arm_smmu_domain_stage {
        ARM_SMMU_DOMAIN_S1 = 0,
@@ -318,6 +345,33 @@ struct arm_smmu_domain {
        struct iommu_domain             domain;
 };
 
+static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg)
+{
+       return ARM_SMMU_TCR_EPD1 |
+              FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
+              FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
+              FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
+              FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
+              FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz);
+}
+
+static inline u32 arm_smmu_lpae_tcr2(struct io_pgtable_cfg *cfg)
+{
+       return FIELD_PREP(ARM_SMMU_TCR2_PASIZE, cfg->arm_lpae_s1_cfg.tcr.ips) |
+              FIELD_PREP(ARM_SMMU_TCR2_SEP, ARM_SMMU_TCR2_SEP_UPSTREAM);
+}
+
+static inline u32 arm_smmu_lpae_vtcr(struct io_pgtable_cfg *cfg)
+{
+       return ARM_SMMU_VTCR_RES1 |
+              FIELD_PREP(ARM_SMMU_VTCR_PS, cfg->arm_lpae_s2_cfg.vtcr.ps) |
+              FIELD_PREP(ARM_SMMU_VTCR_TG0, cfg->arm_lpae_s2_cfg.vtcr.tg) |
+              FIELD_PREP(ARM_SMMU_VTCR_SH0, cfg->arm_lpae_s2_cfg.vtcr.sh) |
+              FIELD_PREP(ARM_SMMU_VTCR_ORGN0, cfg->arm_lpae_s2_cfg.vtcr.orgn) |
+              FIELD_PREP(ARM_SMMU_VTCR_IRGN0, cfg->arm_lpae_s2_cfg.vtcr.irgn) |
+              FIELD_PREP(ARM_SMMU_VTCR_SL0, cfg->arm_lpae_s2_cfg.vtcr.sl) |
+              FIELD_PREP(ARM_SMMU_VTCR_T0SZ, cfg->arm_lpae_s2_cfg.vtcr.tsz);
+}
 
 /* Implementation details, yay! */
 struct arm_smmu_impl {
index 3acfa6a..071bb42 100644 (file)
@@ -244,7 +244,7 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
                     info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
                    (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
                     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
-                     info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
+                     info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
                        pr_warn("Device scope type does not match for %s\n",
                                pci_name(info->dev));
                        return -EINVAL;
@@ -1354,7 +1354,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
        struct qi_desc desc;
 
        if (mask) {
-               WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
                addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
                desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
        } else
@@ -1371,6 +1370,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
        qi_submit_sync(&desc, iommu);
 }
 
+/* PASID-based IOTLB invalidation */
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+                    unsigned long npages, bool ih)
+{
+       struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
+
+       /*
+        * npages == -1 means a PASID-selective invalidation, otherwise,
+        * a positive value for Page-selective-within-PASID invalidation.
+        * 0 is not a valid input.
+        */
+       if (WARN_ON(!npages)) {
+               pr_err("Invalid input npages = %ld\n", npages);
+               return;
+       }
+
+       if (npages == -1) {
+               desc.qw0 = QI_EIOTLB_PASID(pasid) |
+                               QI_EIOTLB_DID(did) |
+                               QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+                               QI_EIOTLB_TYPE;
+               desc.qw1 = 0;
+       } else {
+               int mask = ilog2(__roundup_pow_of_two(npages));
+               unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
+
+               if (WARN_ON_ONCE(!ALIGN(addr, align)))
+                       addr &= ~(align - 1);
+
+               desc.qw0 = QI_EIOTLB_PASID(pasid) |
+                               QI_EIOTLB_DID(did) |
+                               QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
+                               QI_EIOTLB_TYPE;
+               desc.qw1 = QI_EIOTLB_ADDR(addr) |
+                               QI_EIOTLB_IH(ih) |
+                               QI_EIOTLB_AM(mask);
+       }
+
+       qi_submit_sync(&desc, iommu);
+}
+
 /*
  * Disable Queued Invalidation interface.
  */
index 471f05d..c1257be 100644 (file)
@@ -5,6 +5,7 @@
  * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
  *         Sohil Mehta <sohil.mehta@intel.com>
  *         Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *         Lu Baolu <baolu.lu@linux.intel.com>
  */
 
 #include <linux/debugfs.h>
@@ -283,6 +284,77 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
 
+static inline unsigned long level_to_directory_size(int level)
+{
+       return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
+}
+
+static inline void
+dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
+{
+       seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
+                  iova >> VTD_PAGE_SHIFT, path[5], path[4],
+                  path[3], path[2], path[1]);
+}
+
+static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
+                              int level, unsigned long start,
+                              u64 *path)
+{
+       int i;
+
+       if (level > 5 || level < 1)
+               return;
+
+       for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
+                       i++, pde++, start += level_to_directory_size(level)) {
+               if (!dma_pte_present(pde))
+                       continue;
+
+               path[level] = pde->val;
+               if (dma_pte_superpage(pde) || level == 1)
+                       dump_page_info(m, start, path);
+               else
+                       pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
+                                          level - 1, start, path);
+               path[level] = 0;
+       }
+}
+
+static int show_device_domain_translation(struct device *dev, void *data)
+{
+       struct dmar_domain *domain = find_domain(dev);
+       struct seq_file *m = data;
+       u64 path[6] = { 0 };
+
+       if (!domain)
+               return 0;
+
+       seq_printf(m, "Device %s with pasid %d @0x%llx\n",
+                  dev_name(dev), domain->default_pasid,
+                  (u64)virt_to_phys(domain->pgd));
+       seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
+
+       pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       ret = bus_for_each_dev(&pci_bus_type, NULL, m,
+                              show_device_domain_translation);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
+
 #ifdef CONFIG_IRQ_REMAP
 static void ir_tbl_remap_entry_show(struct seq_file *m,
                                    struct intel_iommu *iommu)
@@ -396,6 +468,9 @@ void __init intel_iommu_debugfs_init(void)
                            &iommu_regset_fops);
        debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
                            NULL, &dmar_translation_struct_fops);
+       debugfs_create_file("domain_translation_struct", 0444,
+                           intel_iommu_debug, NULL,
+                           &domain_translation_struct_fops);
 #ifdef CONFIG_IRQ_REMAP
        debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
                            NULL, &ir_translation_struct_fops);
index 35a4a3a..9dc3767 100644 (file)
@@ -307,6 +307,20 @@ static int hw_pass_through = 1;
  */
 #define DOMAIN_FLAG_LOSE_CHILDREN              BIT(1)
 
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL            BIT(2)
+
+/*
+ * Domain represents a virtual machine which demands iommu nested
+ * translation mode support.
+ */
+#define DOMAIN_FLAG_NESTING_MODE               BIT(3)
+
 #define for_each_domain_iommu(idx, domain)                     \
        for (idx = 0; idx < g_num_of_iommus; idx++)             \
                if (domain->iommu_refcnt[idx])
@@ -355,9 +369,14 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
 int dmar_disabled = 0;
 #else
 int dmar_disabled = 1;
-#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
 
+#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+int intel_iommu_sm = 1;
+#else
 int intel_iommu_sm;
+#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
+
 int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
 
@@ -368,7 +387,6 @@ static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
 static int intel_no_bounce;
 
-#define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
 #define IDENTMAP_AZALIA                4
 
@@ -377,7 +395,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 
 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
-static DEFINE_SPINLOCK(device_domain_lock);
+DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
 #define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) &&   \
@@ -552,6 +570,11 @@ static inline int domain_type_is_si(struct dmar_domain *domain)
        return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
 }
 
+static inline bool domain_use_first_level(struct dmar_domain *domain)
+{
+       return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
+}
+
 static inline int domain_pfn_supported(struct dmar_domain *domain,
                                       unsigned long pfn)
 {
@@ -661,11 +684,12 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
        return ret;
 }
 
-static int domain_update_iommu_superpage(struct intel_iommu *skip)
+static int domain_update_iommu_superpage(struct dmar_domain *domain,
+                                        struct intel_iommu *skip)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
-       int mask = 0xf;
+       int mask = 0x3;
 
        if (!intel_iommu_superpage) {
                return 0;
@@ -675,7 +699,13 @@ static int domain_update_iommu_superpage(struct intel_iommu *skip)
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
                if (iommu != skip) {
-                       mask &= cap_super_page_val(iommu->cap);
+                       if (domain && domain_use_first_level(domain)) {
+                               if (!cap_fl1gp_support(iommu->cap))
+                                       mask = 0x1;
+                       } else {
+                               mask &= cap_super_page_val(iommu->cap);
+                       }
+
                        if (!mask)
                                break;
                }
@@ -690,7 +720,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
 {
        domain_update_iommu_coherency(domain);
        domain->iommu_snooping = domain_update_iommu_snooping(NULL);
-       domain->iommu_superpage = domain_update_iommu_superpage(NULL);
+       domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
 }
 
 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -907,6 +937,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
 
                        domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
                        pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+                       if (domain_use_first_level(domain))
+                               pteval |= DMA_FL_PTE_XD;
                        if (cmpxchg64(&pte->val, 0ULL, pteval))
                                /* Someone else set it while we were thinking; use theirs. */
                                free_pgtable_page(tmp_page);
@@ -1477,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
+static void domain_flush_piotlb(struct intel_iommu *iommu,
+                               struct dmar_domain *domain,
+                               u64 addr, unsigned long npages, bool ih)
+{
+       u16 did = domain->iommu_did[iommu->seq_id];
+
+       if (domain->default_pasid)
+               qi_flush_piotlb(iommu, did, domain->default_pasid,
+                               addr, npages, ih);
+
+       if (!list_empty(&domain->devices))
+               qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
+}
+
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
                                  struct dmar_domain *domain,
                                  unsigned long pfn, unsigned int pages,
@@ -1490,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
 
        if (ih)
                ih = 1 << 6;
-       /*
-        * Fallback to domain selective flush if no PSI support or the size is
-        * too big.
-        * PSI requires page size to be 2 ^ x, and the base address is naturally
-        * aligned to the size
-        */
-       if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
-               iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                                               DMA_TLB_DSI_FLUSH);
-       else
-               iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
-                                               DMA_TLB_PSI_FLUSH);
+
+       if (domain_use_first_level(domain)) {
+               domain_flush_piotlb(iommu, domain, addr, pages, ih);
+       } else {
+               /*
+                * Fallback to domain selective flush if no PSI support or
+                * the size is too big. PSI requires page size to be 2 ^ x,
+                * and the base address is naturally aligned to the size.
+                */
+               if (!cap_pgsel_inv(iommu->cap) ||
+                   mask > cap_max_amask_val(iommu->cap))
+                       iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                                       DMA_TLB_DSI_FLUSH);
+               else
+                       iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+                                                       DMA_TLB_PSI_FLUSH);
+       }
 
        /*
         * In caching mode, changes of pages from non-present to present require
@@ -1516,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
                                        struct dmar_domain *domain,
                                        unsigned long pfn, unsigned int pages)
 {
-       /* It's a non-present to present mapping. Only flush if caching mode */
-       if (cap_caching_mode(iommu->cap))
+       /*
+        * It's a non-present to present mapping. Only flush if caching mode
+        * and second level.
+        */
+       if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
                iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
        else
                iommu_flush_write_buffer(iommu);
@@ -1534,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad)
                struct intel_iommu *iommu = g_iommus[idx];
                u16 did = domain->iommu_did[iommu->seq_id];
 
-               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+               if (domain_use_first_level(domain))
+                       domain_flush_piotlb(iommu, domain, 0, -1, 0);
+               else
+                       iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                                DMA_TLB_DSI_FLUSH);
 
                if (!cap_caching_mode(iommu->cap))
                        iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
@@ -1703,6 +1761,33 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
 #endif
 }
 
+/*
+ * Check and return whether first level is used by default for
+ * DMA translation.
+ */
+static bool first_level_by_default(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       static int first_level_support = -1;
+
+       if (likely(first_level_support != -1))
+               return first_level_support;
+
+       first_level_support = 1;
+
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
+                       first_level_support = 0;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return first_level_support;
+}
+
 static struct dmar_domain *alloc_domain(int flags)
 {
        struct dmar_domain *domain;
@@ -1714,6 +1799,8 @@ static struct dmar_domain *alloc_domain(int flags)
        memset(domain, 0, sizeof(*domain));
        domain->nid = NUMA_NO_NODE;
        domain->flags = flags;
+       if (first_level_by_default())
+               domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
        domain->has_iotlb_device = false;
        INIT_LIST_HEAD(&domain->devices);
 
@@ -1843,14 +1930,16 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
 {
        int adjust_width, agaw;
        unsigned long sagaw;
-       int err;
+       int ret;
 
        init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
 
-       err = init_iova_flush_queue(&domain->iovad,
-                                   iommu_flush_iova, iova_entry_free);
-       if (err)
-               return err;
+       if (!intel_iommu_strict) {
+               ret = init_iova_flush_queue(&domain->iovad,
+                                           iommu_flush_iova, iova_entry_free);
+               if (ret)
+                       pr_info("iova flush queue initialization failed\n");
+       }
 
        domain_reserve_special_ranges(domain);
 
@@ -2223,17 +2312,20 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
        unsigned long sg_res = 0;
        unsigned int largepage_lvl = 0;
        unsigned long lvl_pages = 0;
+       u64 attr;
 
        BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
 
        if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
                return -EINVAL;
 
-       prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+       attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
+       if (domain_use_first_level(domain))
+               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
 
        if (!sg) {
                sg_res = nr_pages;
-               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
        }
 
        while (nr_pages > 0) {
@@ -2245,7 +2337,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                        sg_res = aligned_nrpages(sg->offset, sg->length);
                        sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
                        sg->dma_length = sg->length;
-                       pteval = (sg_phys(sg) - pgoff) | prot;
+                       pteval = (sg_phys(sg) - pgoff) | attr;
                        phys_pfn = pteval >> VTD_PAGE_SHIFT;
                }
 
@@ -2414,7 +2506,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
-static struct dmar_domain *find_domain(struct device *dev)
+struct dmar_domain *find_domain(struct device *dev)
 {
        struct device_domain_info *info;
 
@@ -2460,6 +2552,36 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
        return NULL;
 }
 
+static int domain_setup_first_level(struct intel_iommu *iommu,
+                                   struct dmar_domain *domain,
+                                   struct device *dev,
+                                   int pasid)
+{
+       int flags = PASID_FLAG_SUPERVISOR_MODE;
+       struct dma_pte *pgd = domain->pgd;
+       int agaw, level;
+
+       /*
+        * Skip top levels of page tables for iommu which has
+        * less agaw than default. Unnecessary for PT mode.
+        */
+       for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+               pgd = phys_to_virt(dma_pte_addr(pgd));
+               if (!dma_pte_present(pgd))
+                       return -ENOMEM;
+       }
+
+       level = agaw_to_level(agaw);
+       if (level != 4 && level != 5)
+               return -EINVAL;
+
+       flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+
+       return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
+                                            domain->iommu_did[iommu->seq_id],
+                                            flags);
+}
+
 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
                                                    int bus, int devfn,
                                                    struct device *dev,
@@ -2559,6 +2681,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
                if (hw_pass_through && domain_type_is_si(domain))
                        ret = intel_pasid_setup_pass_through(iommu, domain,
                                        dev, PASID_RID2PASID);
+               else if (domain_use_first_level(domain))
+                       ret = domain_setup_first_level(iommu, domain, dev,
+                                       PASID_RID2PASID);
                else
                        ret = intel_pasid_setup_second_level(iommu, domain,
                                        dev, PASID_RID2PASID);
@@ -2764,10 +2889,8 @@ static int __init si_domain_init(int hw)
        }
 
        /*
-        * Normally we use DMA domains for devices which have RMRRs. But we
-        * loose this requirement for graphic and usb devices. Identity map
-        * the RMRRs for graphic and USB devices so that they could use the
-        * si_domain.
+        * Identity map the RMRRs so that devices with RMRRs could also use
+        * the si_domain.
         */
        for_each_rmrr_units(rmrr) {
                for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
@@ -2775,9 +2898,6 @@ static int __init si_domain_init(int hw)
                        unsigned long long start = rmrr->base_address;
                        unsigned long long end = rmrr->end_address;
 
-                       if (device_is_rmrr_locked(dev))
-                               continue;
-
                        if (WARN_ON(end < start ||
                                    end >> agaw_to_width(si_domain->agaw)))
                                continue;
@@ -2916,9 +3036,6 @@ static int device_def_domain_type(struct device *dev)
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
 
-               if (device_is_rmrr_locked(dev))
-                       return IOMMU_DOMAIN_DMA;
-
                /*
                 * Prevent any device marked as untrusted from getting
                 * placed into the statically identity mapping domain.
@@ -2956,13 +3073,9 @@ static int device_def_domain_type(struct device *dev)
                                return IOMMU_DOMAIN_DMA;
                } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
                        return IOMMU_DOMAIN_DMA;
-       } else {
-               if (device_has_rmrr(dev))
-                       return IOMMU_DOMAIN_DMA;
        }
 
-       return (iommu_identity_mapping & IDENTMAP_ALL) ?
-                       IOMMU_DOMAIN_IDENTITY : 0;
+       return 0;
 }
 
 static void intel_iommu_init_qi(struct intel_iommu *iommu)
@@ -3291,10 +3404,7 @@ static int __init init_dmars(void)
 
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
-#ifdef CONFIG_INTEL_IOMMU_SVM
-               if (pasid_supported(iommu))
-                       intel_svm_init(iommu);
-#endif
+               intel_svm_check(iommu);
        }
 
        /*
@@ -3309,9 +3419,6 @@ static int __init init_dmars(void)
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
        }
 
-       if (iommu_default_passthrough())
-               iommu_identity_mapping |= IDENTMAP_ALL;
-
 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
        dmar_map_gfx = 0;
 #endif
@@ -3384,8 +3491,21 @@ static unsigned long intel_alloc_iova(struct device *dev,
 {
        unsigned long iova_pfn;
 
-       /* Restrict dma_mask to the width that the iommu can handle */
-       dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+       /*
+        * Restrict dma_mask to the width that the iommu can handle.
+        * First-level translation restricts the input-address to a
+        * canonical address (i.e., address bits 63:N have the same
+        * value as address bit [N-1], where N is 48-bits with 4-level
+        * paging and 57-bits with 5-level paging). Hence, skip bit
+        * [N-1].
+        */
+       if (domain_use_first_level(domain))
+               dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1),
+                                dma_mask);
+       else
+               dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw),
+                                dma_mask);
+
        /* Ensure we reserve the whole size-aligned region */
        nrpages = __roundup_pow_of_two(nrpages);
 
@@ -3403,7 +3523,8 @@ static unsigned long intel_alloc_iova(struct device *dev,
        iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
                                   IOVA_PFN(dma_mask), true);
        if (unlikely(!iova_pfn)) {
-               dev_err(dev, "Allocating %ld-page iova failed", nrpages);
+               dev_err_once(dev, "Allocating %ld-page iova failed\n",
+                            nrpages);
                return 0;
        }
 
@@ -3771,8 +3892,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
                return 0;
        }
 
-       trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
-                    sg_phys(sglist), size << VTD_PAGE_SHIFT);
+       for_each_sg(sglist, sg, nelems, i)
+               trace_map_sg(dev, i + 1, nelems, sg);
 
        return nelems;
 }
@@ -3984,6 +4105,9 @@ bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
                sg_dma_len(sg) = sg->length;
        }
 
+       for_each_sg(sglist, sg, nelems, i)
+               trace_bounce_map_sg(dev, i + 1, nelems, sg);
+
        return nelems;
 
 out_unmap:
@@ -4312,16 +4436,31 @@ static void __init init_iommu_pm_ops(void)
 static inline void init_iommu_pm_ops(void) {}
 #endif /* CONFIG_PM */
 
+static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
+{
+       if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
+           !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
+           rmrr->end_address <= rmrr->base_address ||
+           arch_rmrr_sanity_check(rmrr))
+               return -EINVAL;
+
+       return 0;
+}
+
 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
 {
        struct acpi_dmar_reserved_memory *rmrr;
        struct dmar_rmrr_unit *rmrru;
-       int ret;
 
        rmrr = (struct acpi_dmar_reserved_memory *)header;
-       ret = arch_rmrr_sanity_check(rmrr);
-       if (ret)
-               return ret;
+       if (rmrr_sanity_check(rmrr))
+               WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+                          "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
+                          "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                          rmrr->base_address, rmrr->end_address,
+                          dmi_get_system_info(DMI_BIOS_VENDOR),
+                          dmi_get_system_info(DMI_BIOS_VERSION),
+                          dmi_get_system_info(DMI_PRODUCT_VERSION));
 
        rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
        if (!rmrru)
@@ -4467,7 +4606,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
                        iommu->name);
                return -ENXIO;
        }
-       sp = domain_update_iommu_superpage(iommu) - 1;
+       sp = domain_update_iommu_superpage(NULL, iommu) - 1;
        if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
                pr_warn("%s: Doesn't support large page.\n",
                        iommu->name);
@@ -4487,10 +4626,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
        if (ret)
                goto out;
 
-#ifdef CONFIG_INTEL_IOMMU_SVM
-       if (pasid_supported(iommu))
-               intel_svm_init(iommu);
-#endif
+       intel_svm_check(iommu);
 
        if (dmaru->ignored) {
                /*
@@ -4895,7 +5031,7 @@ static int __init platform_optin_force_iommu(void)
         * map for all devices except those marked as being untrusted.
         */
        if (dmar_disabled)
-               iommu_identity_mapping |= IDENTMAP_ALL;
+               iommu_set_default_passthrough(false);
 
        dmar_disabled = 0;
        no_iommu = 0;
@@ -5195,6 +5331,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 {
        struct dmar_domain *dmar_domain;
        struct iommu_domain *domain;
+       int ret;
 
        switch (type) {
        case IOMMU_DOMAIN_DMA:
@@ -5211,11 +5348,12 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
                        return NULL;
                }
 
-               if (type == IOMMU_DOMAIN_DMA &&
-                   init_iova_flush_queue(&dmar_domain->iovad,
-                                         iommu_flush_iova, iova_entry_free)) {
-                       pr_warn("iova flush queue initialization failed\n");
-                       intel_iommu_strict = 1;
+               if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA) {
+                       ret = init_iova_flush_queue(&dmar_domain->iovad,
+                                                   iommu_flush_iova,
+                                                   iova_entry_free);
+                       if (ret)
+                               pr_info("iova flush queue initialization failed\n");
                }
 
                domain_update_iommu_cap(dmar_domain);
@@ -5281,7 +5419,7 @@ static void auxiliary_unlink_device(struct dmar_domain *domain,
        domain->auxd_refcnt--;
 
        if (!domain->auxd_refcnt && domain->default_pasid > 0)
-               intel_pasid_free_id(domain->default_pasid);
+               ioasid_free(domain->default_pasid);
 }
 
 static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -5299,10 +5437,11 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
        if (domain->default_pasid <= 0) {
                int pasid;
 
-               pasid = intel_pasid_alloc_id(domain, PASID_MIN,
-                                            pci_max_pasids(to_pci_dev(dev)),
-                                            GFP_KERNEL);
-               if (pasid <= 0) {
+               /* No private data needed for the default pasid */
+               pasid = ioasid_alloc(NULL, PASID_MIN,
+                                    pci_max_pasids(to_pci_dev(dev)) - 1,
+                                    NULL);
+               if (pasid == INVALID_IOASID) {
                        pr_err("Can't allocate default pasid\n");
                        return -ENODEV;
                }
@@ -5320,8 +5459,12 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
                goto attach_failed;
 
        /* Setup the PASID entry for mediated devices: */
-       ret = intel_pasid_setup_second_level(iommu, domain, dev,
-                                            domain->default_pasid);
+       if (domain_use_first_level(domain))
+               ret = domain_setup_first_level(iommu, domain, dev,
+                                              domain->default_pasid);
+       else
+               ret = intel_pasid_setup_second_level(iommu, domain, dev,
+                                                    domain->default_pasid);
        if (ret)
                goto table_failed;
        spin_unlock(&iommu->lock);
@@ -5338,7 +5481,7 @@ attach_failed:
        spin_unlock(&iommu->lock);
        spin_unlock_irqrestore(&device_domain_lock, flags);
        if (!domain->auxd_refcnt && domain->default_pasid > 0)
-               intel_pasid_free_id(domain->default_pasid);
+               ioasid_free(domain->default_pasid);
 
        return ret;
 }
@@ -5592,6 +5735,24 @@ static inline bool iommu_pasid_support(void)
        return ret;
 }
 
+static inline bool nested_mode_support(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       bool ret = true;
+
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
+                       ret = false;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static bool intel_iommu_capable(enum iommu_cap cap)
 {
        if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5749,15 +5910,6 @@ static void intel_iommu_get_resv_regions(struct device *device,
        list_add_tail(&reg->list, head);
 }
 
-static void intel_iommu_put_resv_regions(struct device *dev,
-                                        struct list_head *head)
-{
-       struct iommu_resv_region *entry, *next;
-
-       list_for_each_entry_safe(entry, next, head, list)
-               kfree(entry);
-}
-
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
 {
        struct device_domain_info *info;
@@ -5984,10 +6136,42 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
        return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
 }
 
+static int
+intel_iommu_domain_set_attr(struct iommu_domain *domain,
+                           enum iommu_attr attr, void *data)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       unsigned long flags;
+       int ret = 0;
+
+       if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+               return -EINVAL;
+
+       switch (attr) {
+       case DOMAIN_ATTR_NESTING:
+               spin_lock_irqsave(&device_domain_lock, flags);
+               if (nested_mode_support() &&
+                   list_empty(&dmar_domain->devices)) {
+                       dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
+                       dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
+               } else {
+                       ret = -ENODEV;
+               }
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
 const struct iommu_ops intel_iommu_ops = {
        .capable                = intel_iommu_capable,
        .domain_alloc           = intel_iommu_domain_alloc,
        .domain_free            = intel_iommu_domain_free,
+       .domain_set_attr        = intel_iommu_domain_set_attr,
        .attach_dev             = intel_iommu_attach_device,
        .detach_dev             = intel_iommu_detach_device,
        .aux_attach_dev         = intel_iommu_aux_attach_device,
@@ -5999,7 +6183,7 @@ const struct iommu_ops intel_iommu_ops = {
        .add_device             = intel_iommu_add_device,
        .remove_device          = intel_iommu_remove_device,
        .get_resv_regions       = intel_iommu_get_resv_regions,
-       .put_resv_regions       = intel_iommu_put_resv_regions,
+       .put_resv_regions       = generic_iommu_put_resv_regions,
        .apply_resv_region      = intel_iommu_apply_resv_region,
        .device_group           = intel_iommu_device_group,
        .dev_has_feat           = intel_iommu_dev_has_feat,
index 040a445..22b30f1 100644 (file)
  */
 static DEFINE_SPINLOCK(pasid_lock);
 u32 intel_pasid_max_id = PASID_MAX;
-static DEFINE_IDR(pasid_idr);
-
-int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
-{
-       int ret, min, max;
-
-       min = max_t(int, start, PASID_MIN);
-       max = min_t(int, end, intel_pasid_max_id);
-
-       WARN_ON(in_interrupt());
-       idr_preload(gfp);
-       spin_lock(&pasid_lock);
-       ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
-       spin_unlock(&pasid_lock);
-       idr_preload_end();
-
-       return ret;
-}
-
-void intel_pasid_free_id(int pasid)
-{
-       spin_lock(&pasid_lock);
-       idr_remove(&pasid_idr, pasid);
-       spin_unlock(&pasid_lock);
-}
-
-void *intel_pasid_lookup_id(int pasid)
-{
-       void *p;
-
-       spin_lock(&pasid_lock);
-       p = idr_find(&pasid_idr, pasid);
-       spin_unlock(&pasid_lock);
-
-       return p;
-}
 
 /*
  * Per device pasid table management:
@@ -465,6 +429,21 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
                devtlb_invalidation_with_pasid(iommu, dev, pasid);
 }
 
+static void pasid_flush_caches(struct intel_iommu *iommu,
+                               struct pasid_entry *pte,
+                               int pasid, u16 did)
+{
+       if (!ecap_coherent(iommu->ecap))
+               clflush_cache_range(pte, sizeof(*pte));
+
+       if (cap_caching_mode(iommu->cap)) {
+               pasid_cache_invalidation_with_pasid(iommu, did, pasid);
+               iotlb_invalidation_with_pasid(iommu, did, pasid);
+       } else {
+               iommu_flush_write_buffer(iommu);
+       }
+}
+
 /*
  * Set up the scalable mode pasid table entry for first only
  * translation type.
@@ -498,10 +477,15 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
                pasid_set_sre(pte);
        }
 
-#ifdef CONFIG_X86
-       if (cpu_feature_enabled(X86_FEATURE_LA57))
-               pasid_set_flpm(pte, 1);
-#endif /* CONFIG_X86 */
+       if (flags & PASID_FLAG_FL5LP) {
+               if (cap_5lp_support(iommu->cap)) {
+                       pasid_set_flpm(pte, 1);
+               } else {
+                       pr_err("No 5-level paging support for first-level\n");
+                       pasid_clear_entry(pte);
+                       return -EINVAL;
+               }
+       }
 
        pasid_set_domain_id(pte, did);
        pasid_set_address_width(pte, iommu->agaw);
@@ -510,16 +494,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
        /* Setup Present and PASID Granular Transfer Type: */
        pasid_set_translation_type(pte, 1);
        pasid_set_present(pte);
-
-       if (!ecap_coherent(iommu->ecap))
-               clflush_cache_range(pte, sizeof(*pte));
-
-       if (cap_caching_mode(iommu->cap)) {
-               pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-               iotlb_invalidation_with_pasid(iommu, did, pasid);
-       } else {
-               iommu_flush_write_buffer(iommu);
-       }
+       pasid_flush_caches(iommu, pte, pasid, did);
 
        return 0;
 }
@@ -583,16 +558,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
         */
        pasid_set_sre(pte);
        pasid_set_present(pte);
-
-       if (!ecap_coherent(iommu->ecap))
-               clflush_cache_range(pte, sizeof(*pte));
-
-       if (cap_caching_mode(iommu->cap)) {
-               pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-               iotlb_invalidation_with_pasid(iommu, did, pasid);
-       } else {
-               iommu_flush_write_buffer(iommu);
-       }
+       pasid_flush_caches(iommu, pte, pasid, did);
 
        return 0;
 }
@@ -626,16 +592,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
         */
        pasid_set_sre(pte);
        pasid_set_present(pte);
-
-       if (!ecap_coherent(iommu->ecap))
-               clflush_cache_range(pte, sizeof(*pte));
-
-       if (cap_caching_mode(iommu->cap)) {
-               pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-               iotlb_invalidation_with_pasid(iommu, did, pasid);
-       } else {
-               iommu_flush_write_buffer(iommu);
-       }
+       pasid_flush_caches(iommu, pte, pasid, did);
 
        return 0;
 }
index fc8cd8f..92de6df 100644 (file)
  */
 #define PASID_FLAG_SUPERVISOR_MODE     BIT(0)
 
+/*
+ * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
+ * level translation, otherwise, 4-level paging will be used.
+ */
+#define PASID_FLAG_FL5LP               BIT(1)
+
 struct pasid_dir_entry {
        u64 val;
 };
index dca88f9..d7f2a53 100644 (file)
 #include <linux/dmar.h>
 #include <linux/interrupt.h>
 #include <linux/mm_types.h>
+#include <linux/ioasid.h>
 #include <asm/page.h>
 
 #include "intel-pasid.h"
 
 static irqreturn_t prq_event_thread(int irq, void *d);
 
-int intel_svm_init(struct intel_iommu *iommu)
-{
-       if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
-                       !cap_fl1gp_support(iommu->cap))
-               return -EINVAL;
-
-       if (cpu_feature_enabled(X86_FEATURE_LA57) &&
-                       !cap_5lp_support(iommu->cap))
-               return -EINVAL;
-
-       return 0;
-}
-
 #define PRQ_ORDER 0
 
 int intel_svm_enable_prq(struct intel_iommu *iommu)
@@ -99,6 +87,33 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
        return 0;
 }
 
+static inline bool intel_svm_capable(struct intel_iommu *iommu)
+{
+       return iommu->flags & VTD_FLAG_SVM_CAPABLE;
+}
+
+void intel_svm_check(struct intel_iommu *iommu)
+{
+       if (!pasid_supported(iommu))
+               return;
+
+       if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
+           !cap_fl1gp_support(iommu->cap)) {
+               pr_err("%s SVM disabled, incompatible 1GB page capability\n",
+                      iommu->name);
+               return;
+       }
+
+       if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+           !cap_5lp_support(iommu->cap)) {
+               pr_err("%s SVM disabled, incompatible paging mode\n",
+                      iommu->name);
+               return;
+       }
+
+       iommu->flags |= VTD_FLAG_SVM_CAPABLE;
+}
+
 static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
                                unsigned long address, unsigned long pages, int ih)
 {
@@ -207,6 +222,10 @@ static const struct mmu_notifier_ops intel_mmuops = {
 static DEFINE_MUTEX(pasid_mutex);
 static LIST_HEAD(global_svm_list);
 
+#define for_each_svm_dev(sdev, svm, d)                 \
+       list_for_each_entry((sdev), &(svm)->devs, list) \
+               if ((d) != (sdev)->dev) {} else
+
 int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
 {
        struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
@@ -220,6 +239,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
        if (!iommu || dmar_disabled)
                return -EINVAL;
 
+       if (!intel_svm_capable(iommu))
+               return -ENOTSUPP;
+
        if (dev_is_pci(dev)) {
                pasid_max = pci_max_pasids(to_pci_dev(dev));
                if (pasid_max < 0)
@@ -252,15 +274,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                                goto out;
                        }
 
-                       list_for_each_entry(sdev, &svm->devs, list) {
-                               if (dev == sdev->dev) {
-                                       if (sdev->ops != ops) {
-                                               ret = -EBUSY;
-                                               goto out;
-                                       }
-                                       sdev->users++;
-                                       goto success;
+                       /* Find the matching device in svm list */
+                       for_each_svm_dev(sdev, svm, dev) {
+                               if (sdev->ops != ops) {
+                                       ret = -EBUSY;
+                                       goto out;
                                }
+                               sdev->users++;
+                               goto success;
                        }
 
                        break;
@@ -314,16 +335,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                if (pasid_max > intel_pasid_max_id)
                        pasid_max = intel_pasid_max_id;
 
-               /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
-               ret = intel_pasid_alloc_id(svm,
-                                          !!cap_caching_mode(iommu->cap),
-                                          pasid_max - 1, GFP_KERNEL);
-               if (ret < 0) {
+               /* Do not use PASID 0, reserved for RID to PASID */
+               svm->pasid = ioasid_alloc(NULL, PASID_MIN,
+                                         pasid_max - 1, svm);
+               if (svm->pasid == INVALID_IOASID) {
                        kfree(svm);
                        kfree(sdev);
+                       ret = -ENOSPC;
                        goto out;
                }
-               svm->pasid = ret;
                svm->notifier.ops = &intel_mmuops;
                svm->mm = mm;
                svm->flags = flags;
@@ -333,7 +353,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                if (mm) {
                        ret = mmu_notifier_register(&svm->notifier, mm);
                        if (ret) {
-                               intel_pasid_free_id(svm->pasid);
+                               ioasid_free(svm->pasid);
                                kfree(svm);
                                kfree(sdev);
                                goto out;
@@ -344,12 +364,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                ret = intel_pasid_setup_first_level(iommu, dev,
                                mm ? mm->pgd : init_mm.pgd,
                                svm->pasid, FLPT_DEFAULT_DID,
-                               mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+                               (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+                               (cpu_feature_enabled(X86_FEATURE_LA57) ?
+                                PASID_FLAG_FL5LP : 0));
                spin_unlock(&iommu->lock);
                if (ret) {
                        if (mm)
                                mmu_notifier_unregister(&svm->notifier, mm);
-                       intel_pasid_free_id(svm->pasid);
+                       ioasid_free(svm->pasid);
                        kfree(svm);
                        kfree(sdev);
                        goto out;
@@ -365,7 +387,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                ret = intel_pasid_setup_first_level(iommu, dev,
                                                mm ? mm->pgd : init_mm.pgd,
                                                svm->pasid, FLPT_DEFAULT_DID,
-                                               mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+                                               (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+                                               (cpu_feature_enabled(X86_FEATURE_LA57) ?
+                                               PASID_FLAG_FL5LP : 0));
                spin_unlock(&iommu->lock);
                if (ret) {
                        kfree(sdev);
@@ -397,44 +421,45 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
        if (!iommu)
                goto out;
 
-       svm = intel_pasid_lookup_id(pasid);
+       svm = ioasid_find(NULL, pasid, NULL);
        if (!svm)
                goto out;
 
-       list_for_each_entry(sdev, &svm->devs, list) {
-               if (dev == sdev->dev) {
-                       ret = 0;
-                       sdev->users--;
-                       if (!sdev->users) {
-                               list_del_rcu(&sdev->list);
-                               /* Flush the PASID cache and IOTLB for this device.
-                                * Note that we do depend on the hardware *not* using
-                                * the PASID any more. Just as we depend on other
-                                * devices never using PASIDs that they have no right
-                                * to use. We have a *shared* PASID table, because it's
-                                * large and has to be physically contiguous. So it's
-                                * hard to be as defensive as we might like. */
-                               intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
-                               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
-                               kfree_rcu(sdev, rcu);
-
-                               if (list_empty(&svm->devs)) {
-                                       intel_pasid_free_id(svm->pasid);
-                                       if (svm->mm)
-                                               mmu_notifier_unregister(&svm->notifier, svm->mm);
-
-                                       list_del(&svm->list);
-
-                                       /* We mandate that no page faults may be outstanding
-                                        * for the PASID when intel_svm_unbind_mm() is called.
-                                        * If that is not obeyed, subtle errors will happen.
-                                        * Let's make them less subtle... */
-                                       memset(svm, 0x6b, sizeof(*svm));
-                                       kfree(svm);
-                               }
+       if (IS_ERR(svm)) {
+               ret = PTR_ERR(svm);
+               goto out;
+       }
+
+       for_each_svm_dev(sdev, svm, dev) {
+               ret = 0;
+               sdev->users--;
+               if (!sdev->users) {
+                       list_del_rcu(&sdev->list);
+                       /* Flush the PASID cache and IOTLB for this device.
+                        * Note that we do depend on the hardware *not* using
+                        * the PASID any more. Just as we depend on other
+                        * devices never using PASIDs that they have no right
+                        * to use. We have a *shared* PASID table, because it's
+                        * large and has to be physically contiguous. So it's
+                        * hard to be as defensive as we might like. */
+                       intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
+                       intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+                       kfree_rcu(sdev, rcu);
+
+                       if (list_empty(&svm->devs)) {
+                               ioasid_free(svm->pasid);
+                               if (svm->mm)
+                                       mmu_notifier_unregister(&svm->notifier, svm->mm);
+                               list_del(&svm->list);
+                               /* We mandate that no page faults may be outstanding
+                                * for the PASID when intel_svm_unbind_mm() is called.
+                                * If that is not obeyed, subtle errors will happen.
+                                * Let's make them less subtle... */
+                               memset(svm, 0x6b, sizeof(*svm));
+                               kfree(svm);
                        }
-                       break;
                }
+               break;
        }
  out:
        mutex_unlock(&pasid_mutex);
@@ -454,10 +479,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
        if (!iommu)
                goto out;
 
-       svm = intel_pasid_lookup_id(pasid);
+       svm = ioasid_find(NULL, pasid, NULL);
        if (!svm)
                goto out;
 
+       if (IS_ERR(svm)) {
+               ret = PTR_ERR(svm);
+               goto out;
+       }
        /* init_mm is used in this case */
        if (!svm->mm)
                ret = 1;
@@ -564,13 +593,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
 
                if (!svm || svm->pasid != req->pasid) {
                        rcu_read_lock();
-                       svm = intel_pasid_lookup_id(req->pasid);
+                       svm = ioasid_find(NULL, req->pasid, NULL);
                        /* It *can't* go away, because the driver is not permitted
                         * to unbind the mm while any page faults are outstanding.
                         * So we only need RCU to protect the internal idr code. */
                        rcu_read_unlock();
-
-                       if (!svm) {
+                       if (IS_ERR_OR_NULL(svm)) {
                                pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
                                       iommu->name, req->pasid, ((unsigned long long *)req)[0],
                                       ((unsigned long long *)req)[1]);
@@ -654,11 +682,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                        if (req->priv_data_present)
                                memcpy(&resp.qw2, req->priv_data,
                                       sizeof(req->priv_data));
+                       resp.qw2 = 0;
+                       resp.qw3 = 0;
+                       qi_submit_sync(&resp, iommu);
                }
-               resp.qw2 = 0;
-               resp.qw3 = 0;
-               qi_submit_sync(&resp, iommu);
-
                head = (head + sizeof(*req)) & PRQ_RING_MASK;
        }
 
index 7c3bd2c..4272fe4 100644 (file)
 #define ARM_V7S_TTBR_IRGN_ATTR(attr)                                   \
        ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
 
-#define ARM_V7S_TCR_PD1                        BIT(5)
-
 #ifdef CONFIG_ZONE_DMA32
 #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
 #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
@@ -798,8 +796,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
         */
        cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
 
-       /* TCR: T0SZ=0, disable TTBR1 */
-       cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1;
+       /* TCR: T0SZ=0, EAE=0 (if applicable) */
+       cfg->arm_v7s_cfg.tcr = 0;
 
        /*
         * TEX remap: the indices used map to the closest equivalent types
@@ -822,15 +820,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
        /* Ensure the empty pgd is visible before any actual TTBR write */
        wmb();
 
-       /* TTBRs */
-       cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
-                                  ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
-                                  (cfg->coherent_walk ?
-                                  (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
-                                   ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
-                                  (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
-                                   ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
-       cfg->arm_v7s_cfg.ttbr[1] = 0;
+       /* TTBR */
+       cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
+                               (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
+                                ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+                                ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+                               (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+                                ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
        return &data->iop;
 
 out_free_data:
index bdf47f7..983b084 100644 (file)
 #define ARM_LPAE_PTE_MEMATTR_DEV       (((arm_lpae_iopte)0x1) << 2)
 
 /* Register bits */
-#define ARM_32_LPAE_TCR_EAE            (1 << 31)
-#define ARM_64_LPAE_S2_TCR_RES1                (1 << 31)
+#define ARM_LPAE_TCR_TG0_4K            0
+#define ARM_LPAE_TCR_TG0_64K           1
+#define ARM_LPAE_TCR_TG0_16K           2
 
-#define ARM_LPAE_TCR_EPD1              (1 << 23)
+#define ARM_LPAE_TCR_TG1_16K           1
+#define ARM_LPAE_TCR_TG1_4K            2
+#define ARM_LPAE_TCR_TG1_64K           3
 
-#define ARM_LPAE_TCR_TG0_4K            (0 << 14)
-#define ARM_LPAE_TCR_TG0_64K           (1 << 14)
-#define ARM_LPAE_TCR_TG0_16K           (2 << 14)
-
-#define ARM_LPAE_TCR_SH0_SHIFT         12
-#define ARM_LPAE_TCR_SH0_MASK          0x3
 #define ARM_LPAE_TCR_SH_NS             0
 #define ARM_LPAE_TCR_SH_OS             2
 #define ARM_LPAE_TCR_SH_IS             3
 
-#define ARM_LPAE_TCR_ORGN0_SHIFT       10
-#define ARM_LPAE_TCR_IRGN0_SHIFT       8
-#define ARM_LPAE_TCR_RGN_MASK          0x3
 #define ARM_LPAE_TCR_RGN_NC            0
 #define ARM_LPAE_TCR_RGN_WBWA          1
 #define ARM_LPAE_TCR_RGN_WT            2
 #define ARM_LPAE_TCR_RGN_WB            3
 
-#define ARM_LPAE_TCR_SL0_SHIFT         6
-#define ARM_LPAE_TCR_SL0_MASK          0x3
+#define ARM_LPAE_VTCR_SL0_MASK         0x3
 
 #define ARM_LPAE_TCR_T0SZ_SHIFT                0
-#define ARM_LPAE_TCR_SZ_MASK           0xf
-
-#define ARM_LPAE_TCR_PS_SHIFT          16
-#define ARM_LPAE_TCR_PS_MASK           0x7
 
-#define ARM_LPAE_TCR_IPS_SHIFT         32
-#define ARM_LPAE_TCR_IPS_MASK          0x7
+#define ARM_LPAE_VTCR_PS_SHIFT         16
+#define ARM_LPAE_VTCR_PS_MASK          0x7
 
 #define ARM_LPAE_TCR_PS_32_BIT         0x0ULL
 #define ARM_LPAE_TCR_PS_36_BIT         0x1ULL
@@ -293,17 +282,11 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 {
        arm_lpae_iopte pte = prot;
 
-       if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
-               pte |= ARM_LPAE_PTE_NS;
-
        if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
                pte |= ARM_LPAE_PTE_TYPE_PAGE;
        else
                pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 
-       if (data->iop.fmt != ARM_MALI_LPAE)
-               pte |= ARM_LPAE_PTE_AF;
-       pte |= ARM_LPAE_PTE_SH_IS;
        pte |= paddr_to_iopte(paddr, data);
 
        __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
@@ -460,9 +443,20 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
                                << ARM_LPAE_PTE_ATTRINDX_SHIFT);
        }
 
+       if (prot & IOMMU_CACHE)
+               pte |= ARM_LPAE_PTE_SH_IS;
+       else
+               pte |= ARM_LPAE_PTE_SH_OS;
+
        if (prot & IOMMU_NOEXEC)
                pte |= ARM_LPAE_PTE_XN;
 
+       if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+               pte |= ARM_LPAE_PTE_NS;
+
+       if (data->iop.fmt != ARM_MALI_LPAE)
+               pte |= ARM_LPAE_PTE_AF;
+
        return pte;
 }
 
@@ -474,6 +468,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
        arm_lpae_iopte *ptep = data->pgd;
        int ret, lvl = data->start_level;
        arm_lpae_iopte prot;
+       long iaext = (long)iova >> cfg->ias;
 
        /* If no access, then nothing to do */
        if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
@@ -482,7 +477,9 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
        if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
                return -EINVAL;
 
-       if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+               iaext = ~iaext;
+       if (WARN_ON(iaext || paddr >> cfg->oas))
                return -ERANGE;
 
        prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -648,11 +645,14 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
        arm_lpae_iopte *ptep = data->pgd;
+       long iaext = (long)iova >> cfg->ias;
 
        if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
                return 0;
 
-       if (WARN_ON(iova >> data->iop.cfg.ias))
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+               iaext = ~iaext;
+       if (WARN_ON(iaext))
                return 0;
 
        return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
@@ -787,9 +787,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 {
        u64 reg;
        struct arm_lpae_io_pgtable *data;
+       typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
+       bool tg1;
 
        if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
-                           IO_PGTABLE_QUIRK_NON_STRICT))
+                           IO_PGTABLE_QUIRK_NON_STRICT |
+                           IO_PGTABLE_QUIRK_ARM_TTBR1))
                return NULL;
 
        data = arm_lpae_alloc_pgtable(cfg);
@@ -798,58 +801,55 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 
        /* TCR */
        if (cfg->coherent_walk) {
-               reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
-                     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
-                     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+               tcr->sh = ARM_LPAE_TCR_SH_IS;
+               tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
+               tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
        } else {
-               reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
-                     (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
-                     (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
+               tcr->sh = ARM_LPAE_TCR_SH_OS;
+               tcr->irgn = ARM_LPAE_TCR_RGN_NC;
+               tcr->orgn = ARM_LPAE_TCR_RGN_NC;
        }
 
+       tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
        switch (ARM_LPAE_GRANULE(data)) {
        case SZ_4K:
-               reg |= ARM_LPAE_TCR_TG0_4K;
+               tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
                break;
        case SZ_16K:
-               reg |= ARM_LPAE_TCR_TG0_16K;
+               tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
                break;
        case SZ_64K:
-               reg |= ARM_LPAE_TCR_TG0_64K;
+               tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
                break;
        }
 
        switch (cfg->oas) {
        case 32:
-               reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
                break;
        case 36:
-               reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
                break;
        case 40:
-               reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
                break;
        case 42:
-               reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
                break;
        case 44:
-               reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
                break;
        case 48:
-               reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
                break;
        case 52:
-               reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
                break;
        default:
                goto out_free_data;
        }
 
-       reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
-
-       /* Disable speculative walks through TTBR1 */
-       reg |= ARM_LPAE_TCR_EPD1;
-       cfg->arm_lpae_s1_cfg.tcr = reg;
+       tcr->tsz = 64ULL - cfg->ias;
 
        /* MAIRs */
        reg = (ARM_LPAE_MAIR_ATTR_NC
@@ -872,9 +872,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
        /* Ensure the empty pgd is visible before any actual TTBR write */
        wmb();
 
-       /* TTBRs */
-       cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
-       cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+       /* TTBR */
+       cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
        return &data->iop;
 
 out_free_data:
@@ -885,8 +884,9 @@ out_free_data:
 static struct io_pgtable *
 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 {
-       u64 reg, sl;
+       u64 sl;
        struct arm_lpae_io_pgtable *data;
+       typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
 
        /* The NS quirk doesn't apply at stage 2 */
        if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
@@ -911,55 +911,59 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
        }
 
        /* VTCR */
-       reg = ARM_64_LPAE_S2_TCR_RES1 |
-            (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
-            (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
-            (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+       if (cfg->coherent_walk) {
+               vtcr->sh = ARM_LPAE_TCR_SH_IS;
+               vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
+               vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
+       } else {
+               vtcr->sh = ARM_LPAE_TCR_SH_OS;
+               vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
+               vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
+       }
 
        sl = data->start_level;
 
        switch (ARM_LPAE_GRANULE(data)) {
        case SZ_4K:
-               reg |= ARM_LPAE_TCR_TG0_4K;
+               vtcr->tg = ARM_LPAE_TCR_TG0_4K;
                sl++; /* SL0 format is different for 4K granule size */
                break;
        case SZ_16K:
-               reg |= ARM_LPAE_TCR_TG0_16K;
+               vtcr->tg = ARM_LPAE_TCR_TG0_16K;
                break;
        case SZ_64K:
-               reg |= ARM_LPAE_TCR_TG0_64K;
+               vtcr->tg = ARM_LPAE_TCR_TG0_64K;
                break;
        }
 
        switch (cfg->oas) {
        case 32:
-               reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
                break;
        case 36:
-               reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
                break;
        case 40:
-               reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
                break;
        case 42:
-               reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
                break;
        case 44:
-               reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
                break;
        case 48:
-               reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
                break;
        case 52:
-               reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
                break;
        default:
                goto out_free_data;
        }
 
-       reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
-       reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
-       cfg->arm_lpae_s2_cfg.vtcr = reg;
+       vtcr->tsz = 64ULL - cfg->ias;
+       vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
 
        /* Allocate pgd pages */
        data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
@@ -982,35 +986,21 @@ out_free_data:
 static struct io_pgtable *
 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 {
-       struct io_pgtable *iop;
-
        if (cfg->ias > 32 || cfg->oas > 40)
                return NULL;
 
        cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
-       iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
-       if (iop) {
-               cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
-               cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
-       }
-
-       return iop;
+       return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
 }
 
 static struct io_pgtable *
 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 {
-       struct io_pgtable *iop;
-
        if (cfg->ias > 40 || cfg->oas > 40)
                return NULL;
 
        cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
-       iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
-       if (iop)
-               cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
-
-       return iop;
+       return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
 }
 
 static struct io_pgtable *
index ced53e5..94394c8 100644 (file)
@@ -63,7 +63,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
        if (!ops)
                return;
 
-       iop = container_of(ops, struct io_pgtable, ops);
+       iop = io_pgtable_ops_to_pgtable(ops);
        io_pgtable_tlb_flush_all(iop);
        io_pgtable_init_table[iop->fmt]->free(iop);
 }
index e436ff8..9986921 100644 (file)
@@ -87,6 +87,7 @@ error:
        put_device(iommu->dev);
        return ret;
 }
+EXPORT_SYMBOL_GPL(iommu_device_sysfs_add);
 
 void iommu_device_sysfs_remove(struct iommu_device *iommu)
 {
@@ -94,6 +95,8 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu)
        device_unregister(iommu->dev);
        iommu->dev = NULL;
 }
+EXPORT_SYMBOL_GPL(iommu_device_sysfs_remove);
+
 /*
  * IOMMU drivers can indicate a device is managed by a given IOMMU using
  * this interface.  A link to the device will be created in the "devices"
@@ -119,6 +122,7 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(iommu_device_link);
 
 void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
 {
@@ -128,3 +132,4 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
        sysfs_remove_link(&link->kobj, "iommu");
        sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
 }
+EXPORT_SYMBOL_GPL(iommu_device_unlink);
index 3ead597..3e35284 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/bitops.h>
 #include <linux/property.h>
 #include <linux/fsl/mc.h>
+#include <linux/module.h>
 #include <trace/events/iommu.h>
 
 static struct kset *iommu_group_kset;
@@ -141,6 +142,7 @@ int iommu_device_register(struct iommu_device *iommu)
        spin_unlock(&iommu_device_lock);
        return 0;
 }
+EXPORT_SYMBOL_GPL(iommu_device_register);
 
 void iommu_device_unregister(struct iommu_device *iommu)
 {
@@ -148,6 +150,7 @@ void iommu_device_unregister(struct iommu_device *iommu)
        list_del(&iommu->list);
        spin_unlock(&iommu_device_lock);
 }
+EXPORT_SYMBOL_GPL(iommu_device_unregister);
 
 static struct iommu_param *iommu_get_dev_param(struct device *dev)
 {
@@ -183,10 +186,21 @@ int iommu_probe_device(struct device *dev)
        if (!iommu_get_dev_param(dev))
                return -ENOMEM;
 
+       if (!try_module_get(ops->owner)) {
+               ret = -EINVAL;
+               goto err_free_dev_param;
+       }
+
        ret = ops->add_device(dev);
        if (ret)
-               iommu_free_dev_param(dev);
+               goto err_module_put;
+
+       return 0;
 
+err_module_put:
+       module_put(ops->owner);
+err_free_dev_param:
+       iommu_free_dev_param(dev);
        return ret;
 }
 
@@ -197,7 +211,10 @@ void iommu_release_device(struct device *dev)
        if (dev->iommu_group)
                ops->remove_device(dev);
 
-       iommu_free_dev_param(dev);
+       if (dev->iommu_param) {
+               module_put(ops->owner);
+               iommu_free_dev_param(dev);
+       }
 }
 
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -887,6 +904,7 @@ struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
        kobject_get(group->devices_kobj);
        return group;
 }
+EXPORT_SYMBOL_GPL(iommu_group_ref_get);
 
 /**
  * iommu_group_put - Decrement group reference
@@ -1260,6 +1278,7 @@ struct iommu_group *generic_device_group(struct device *dev)
 {
        return iommu_group_alloc();
 }
+EXPORT_SYMBOL_GPL(generic_device_group);
 
 /*
  * Use standard PCI bus topology, isolation features, and DMA alias quirks
@@ -1327,6 +1346,7 @@ struct iommu_group *pci_device_group(struct device *dev)
        /* No shared group found, allocate new */
        return iommu_group_alloc();
 }
+EXPORT_SYMBOL_GPL(pci_device_group);
 
 /* Get the IOMMU group for device on fsl-mc bus */
 struct iommu_group *fsl_mc_device_group(struct device *dev)
@@ -1339,6 +1359,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
                group = iommu_group_alloc();
        return group;
 }
+EXPORT_SYMBOL_GPL(fsl_mc_device_group);
 
 /**
  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
@@ -1407,6 +1428,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
        return group;
 }
+EXPORT_SYMBOL(iommu_group_get_for_dev);
 
 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
 {
@@ -1537,6 +1559,11 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
 {
        int err;
 
+       if (ops == NULL) {
+               bus->iommu_ops = NULL;
+               return 0;
+       }
+
        if (bus->iommu_ops != NULL)
                return -EBUSY;
 
@@ -2230,6 +2257,25 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
                ops->put_resv_regions(dev, list);
 }
 
+/**
+ * generic_iommu_put_resv_regions - Reserved region driver helper
+ * @dev: device for which to free reserved regions
+ * @list: reserved region list for device
+ *
+ * IOMMU drivers can use this to implement their .put_resv_regions() callback
+ * for simple reservations. Memory allocated for each reserved region will be
+ * freed. If an IOMMU driver allocates additional resources per region, it is
+ * going to have to implement a custom callback.
+ */
+void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
+{
+       struct iommu_resv_region *entry, *next;
+
+       list_for_each_entry_safe(entry, next, list, list)
+               kfree(entry);
+}
+EXPORT_SYMBOL(generic_iommu_put_resv_regions);
+
 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
                                                  size_t length, int prot,
                                                  enum iommu_resv_type type)
@@ -2247,6 +2293,7 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
        region->type = type;
        return region;
 }
+EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
 
 static int
 request_default_domain_for_dev(struct device *dev, unsigned long type)
index c7a914b..0e6a953 100644 (file)
@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
 
 struct iova *alloc_iova_mem(void)
 {
-       return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
+       return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
 }
 EXPORT_SYMBOL(alloc_iova_mem);
 
index d02edd2..ecb3f94 100644 (file)
@@ -374,7 +374,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
        u32 tmp;
 
        /* TTBR0 */
-       ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
+       ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
        ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
        ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
 
index 93f14bc..94a6df1 100644 (file)
@@ -279,8 +279,8 @@ static void __program_context(void __iomem *base, int ctx,
        SET_V2PCFG(base, ctx, 0x3);
 
        SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
-       SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
-       SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
+       SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
+       SET_TTBR1(base, ctx, 0);
 
        /* Set prrr and nmrr */
        SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
index 6fc1f5e..95945f4 100644 (file)
@@ -367,7 +367,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
        /* Update the pgtable base address register of the M4U HW */
        if (!data->m4u_dom) {
                data->m4u_dom = dom;
-               writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
+               writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
                       data->base + REG_MMU_PT_BASE_ADDR);
        }
 
@@ -765,7 +765,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
        writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
        writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
        if (m4u_dom)
-               writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
+               writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
                       base + REG_MMU_PT_BASE_ADDR);
        return 0;
 }
index 026ad2b..20738aa 100644 (file)
@@ -8,11 +8,12 @@
 #include <linux/export.h>
 #include <linux/iommu.h>
 #include <linux/limits.h>
-#include <linux/pci.h>
+#include <linux/module.h>
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_iommu.h>
 #include <linux/of_pci.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/fsl/mc.h>
 
@@ -91,16 +92,16 @@ static int of_iommu_xlate(struct device *dev,
 {
        const struct iommu_ops *ops;
        struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
-       int err;
+       int ret;
 
        ops = iommu_ops_from_fwnode(fwnode);
        if ((ops && !ops->of_xlate) ||
            !of_device_is_available(iommu_spec->np))
                return NO_IOMMU;
 
-       err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
-       if (err)
-               return err;
+       ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+       if (ret)
+               return ret;
        /*
         * The otherwise-empty fwspec handily serves to indicate the specific
         * IOMMU device we're waiting for, which will be useful if we ever get
@@ -109,7 +110,12 @@ static int of_iommu_xlate(struct device *dev,
        if (!ops)
                return driver_deferred_probe_check_state(dev);
 
-       return ops->of_xlate(dev, iommu_spec);
+       if (!try_module_get(ops->owner))
+               return -ENODEV;
+
+       ret = ops->of_xlate(dev, iommu_spec);
+       module_put(ops->owner);
+       return ret;
 }
 
 struct of_pci_iommu_alias_info {
@@ -179,6 +185,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
                        .np = master_np,
                };
 
+               pci_request_acs();
                err = pci_for_each_dma_alias(to_pci_dev(dev),
                                             of_pci_iommu_init, &info);
        } else if (dev_is_fsl_mc(dev)) {
@@ -196,8 +203,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
                        if (err)
                                break;
                }
-       }
 
+               fwspec = dev_iommu_fwspec_get(dev);
+               if (!err && fwspec)
+                       of_property_read_u32(master_np, "pasid-num-bits",
+                                            &fwspec->num_pasid_bits);
+       }
 
        /*
         * Two success conditions can be represented by non-negative err here:
index 52f3829..39759db 100644 (file)
@@ -201,7 +201,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
 
        fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
 
-       if (!(fsr & FSR_FAULT))
+       if (!(fsr & ARM_SMMU_FSR_FAULT))
                return IRQ_NONE;
 
        fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
@@ -215,7 +215,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
        }
 
        iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
-       iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
+       iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
 
        return IRQ_HANDLED;
 }
@@ -269,18 +269,15 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
 
                /* TTBRs */
                iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
-                               pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
-                               FIELD_PREP(TTBRn_ASID, ctx->asid));
-               iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
-                               pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
-                               FIELD_PREP(TTBRn_ASID, ctx->asid));
+                               pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
+                               FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
+               iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
 
                /* TCR */
                iommu_writel(ctx, ARM_SMMU_CB_TCR2,
-                               (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
-                               FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
+                               arm_smmu_lpae_tcr2(&pgtbl_cfg));
                iommu_writel(ctx, ARM_SMMU_CB_TCR,
-                               pgtbl_cfg.arm_lpae_s1_cfg.tcr);
+                            arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
 
                /* MAIRs (stage-1 only) */
                iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
@@ -289,11 +286,13 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
                                pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
 
                /* SCTLR */
-               reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
-                       SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
+               reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
+                     ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
+                     ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
+                     ARM_SMMU_SCTLR_CFCFG;
 
                if (IS_ENABLED(CONFIG_BIG_ENDIAN))
-                       reg |= SCTLR_E;
+                       reg |= ARM_SMMU_SCTLR_E;
 
                iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
 
index 315c7cc..cce329d 100644 (file)
@@ -837,14 +837,6 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
        iommu_dma_get_resv_regions(dev, head);
 }
 
-static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
-{
-       struct iommu_resv_region *entry, *next;
-
-       list_for_each_entry_safe(entry, next, head, list)
-               kfree(entry);
-}
-
 static struct iommu_ops viommu_ops;
 static struct virtio_driver virtio_iommu_drv;
 
@@ -914,7 +906,7 @@ static int viommu_add_device(struct device *dev)
 err_unlink_dev:
        iommu_device_unlink(&viommu->iommu, dev);
 err_free_dev:
-       viommu_put_resv_regions(dev, &vdev->resv_regions);
+       generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
        kfree(vdev);
 
        return ret;
@@ -932,7 +924,7 @@ static void viommu_remove_device(struct device *dev)
 
        iommu_group_remove_device(dev);
        iommu_device_unlink(&vdev->viommu->iommu, dev);
-       viommu_put_resv_regions(dev, &vdev->resv_regions);
+       generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
        kfree(vdev);
 }
 
@@ -961,7 +953,7 @@ static struct iommu_ops viommu_ops = {
        .remove_device          = viommu_remove_device,
        .device_group           = viommu_device_group,
        .get_resv_regions       = viommu_get_resv_regions,
-       .put_resv_regions       = viommu_put_resv_regions,
+       .put_resv_regions       = generic_iommu_put_resv_regions,
        .of_xlate               = viommu_of_xlate,
 };
 
index eadbe59..b5ed4ea 100644 (file)
@@ -199,8 +199,8 @@ static ssize_t empty_read(struct file *file, char __user *buf,
        return 0;
 }
 
-static const struct file_operations empty_fops = {
-       .read   = empty_read,
+static const struct proc_ops empty_proc_ops = {
+       .proc_read      = empty_read,
 };
 
 // ---------------------------------------------------------------------------
@@ -214,7 +214,7 @@ kcapi_proc_init(void)
        proc_create_seq("capi/contrstats",   0, NULL, &seq_contrstats_ops);
        proc_create_seq("capi/applications", 0, NULL, &seq_applications_ops);
        proc_create_seq("capi/applstats",    0, NULL, &seq_applstats_ops);
-       proc_create("capi/driver",           0, NULL, &empty_fops);
+       proc_create("capi/driver",           0, NULL, &empty_proc_ops);
 }
 
 void
index 574e122..cbd46c1 100644 (file)
@@ -178,7 +178,7 @@ config THERM_ADT746X
        depends on I2C && I2C_POWERMAC && PPC_PMAC && !PPC_PMAC64
        help
          This driver provides some thermostat and fan control for the
-          iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
+         iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
          better fan behaviour by default, and some manual control.
 
 config WINDFARM
@@ -214,7 +214,7 @@ config WINDFARM_PM91
        select I2C_POWERMAC
        help
          This driver provides thermal control for the PowerMac9,1
-          which is the recent (SMU based) single CPU desktop G5
+         which is the recent (SMU based) single CPU desktop G5
 
 config WINDFARM_PM112
        tristate "Support for thermal management on PowerMac11,2"
@@ -242,7 +242,7 @@ config PMAC_RACKMETER
        depends on PPC_PMAC
        help
          This driver provides some support to control the front panel
-          blue LEDs "vu-meter" of the XServer macs.
+         blue LEDs "vu-meter" of the XServer macs.
 
 config SENSORS_AMS
        tristate "Apple Motion Sensor driver"
index 21d532a..d38fb78 100644 (file)
@@ -212,7 +212,7 @@ static int pmu_info_proc_show(struct seq_file *m, void *v);
 static int pmu_irqstats_proc_show(struct seq_file *m, void *v);
 static int pmu_battery_proc_show(struct seq_file *m, void *v);
 static void pmu_pass_intr(unsigned char *data, int len);
-static const struct file_operations pmu_options_proc_fops;
+static const struct proc_ops pmu_options_proc_ops;
 
 #ifdef CONFIG_ADB
 const struct adb_driver via_pmu_driver = {
@@ -573,7 +573,7 @@ static int __init via_pmu_dev_init(void)
                proc_pmu_irqstats = proc_create_single("interrupts", 0,
                                proc_pmu_root, pmu_irqstats_proc_show);
                proc_pmu_options = proc_create("options", 0600, proc_pmu_root,
-                                               &pmu_options_proc_fops);
+                                               &pmu_options_proc_ops);
        }
        return 0;
 }
@@ -974,13 +974,12 @@ static ssize_t pmu_options_proc_write(struct file *file,
        return fcount;
 }
 
-static const struct file_operations pmu_options_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = pmu_options_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = pmu_options_proc_write,
+static const struct proc_ops pmu_options_proc_ops = {
+       .proc_open      = pmu_options_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = pmu_options_proc_write,
 };
 
 #ifdef CONFIG_ADB
index adf26a2..74a9849 100644 (file)
@@ -330,6 +330,9 @@ struct cached_dev {
         */
        atomic_t                has_dirty;
 
+#define BCH_CACHE_READA_ALL            0
+#define BCH_CACHE_READA_META_ONLY      1
+       unsigned int            cache_readahead_policy;
        struct bch_ratelimit    writeback_rate;
        struct delayed_work     writeback_rate_update;
 
index c71365e..a50dcfd 100644 (file)
@@ -397,7 +397,8 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
 
 /* Bkey utility code */
 
-#define bset_bkey_last(i)      bkey_idx((struct bkey *) (i)->d, (i)->keys)
+#define bset_bkey_last(i)      bkey_idx((struct bkey *) (i)->d, \
+                                        (unsigned int)(i)->keys)
 
 static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
 {
index 33ddc52..6730820 100644 (file)
@@ -422,7 +422,8 @@ err:
 static void btree_flush_write(struct cache_set *c)
 {
        struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
-       unsigned int i, nr, ref_nr;
+       unsigned int i, nr;
+       int ref_nr;
        atomic_t *fifo_front_p, *now_fifo_front_p;
        size_t mask;
 
index 73478a9..820d840 100644 (file)
@@ -379,13 +379,20 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
                goto skip;
 
        /*
-        * Flag for bypass if the IO is for read-ahead or background,
-        * unless the read-ahead request is for metadata
+        * If the bio is for read-ahead or background IO, bypass it or
+        * not depends on the following situations,
+        * - If the IO is for meta data, always cache it and no bypass
+        * - If the IO is not meta data, check dc->cache_reada_policy,
+        *      BCH_CACHE_READA_ALL: cache it and not bypass
+        *      BCH_CACHE_READA_META_ONLY: not cache it and bypass
+        * That is, read-ahead request for metadata always get cached
         * (eg, for gfs2 or xfs).
         */
-       if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
-           !(bio->bi_opf & (REQ_META|REQ_PRIO)))
-               goto skip;
+       if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
+               if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
+                   (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
+                       goto skip;
+       }
 
        if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
            bio_sectors(bio) & (c->sb.block_size - 1)) {
index ba1c937..503aafe 100644 (file)
@@ -109,9 +109,13 @@ int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
 
 void bch_cache_accounting_clear(struct cache_accounting *acc)
 {
-       memset(&acc->total.cache_hits,
-              0,
-              sizeof(struct cache_stats));
+       acc->total.cache_hits = 0;
+       acc->total.cache_misses = 0;
+       acc->total.cache_bypass_hits = 0;
+       acc->total.cache_bypass_misses = 0;
+       acc->total.cache_readaheads = 0;
+       acc->total.cache_miss_collisions = 0;
+       acc->total.sectors_bypassed = 0;
 }
 
 void bch_cache_accounting_destroy(struct cache_accounting *acc)
index 3dea1d5..2749daf 100644 (file)
@@ -609,12 +609,13 @@ int bch_prio_write(struct cache *ca, bool wait)
        return 0;
 }
 
-static void prio_read(struct cache *ca, uint64_t bucket)
+static int prio_read(struct cache *ca, uint64_t bucket)
 {
        struct prio_set *p = ca->disk_buckets;
        struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
        struct bucket *b;
        unsigned int bucket_nr = 0;
+       int ret = -EIO;
 
        for (b = ca->buckets;
             b < ca->buckets + ca->sb.nbuckets;
@@ -627,11 +628,15 @@ static void prio_read(struct cache *ca, uint64_t bucket)
                        prio_io(ca, bucket, REQ_OP_READ, 0);
 
                        if (p->csum !=
-                           bch_crc64(&p->magic, bucket_bytes(ca) - 8))
+                           bch_crc64(&p->magic, bucket_bytes(ca) - 8)) {
                                pr_warn("bad csum reading priorities");
+                               goto out;
+                       }
 
-                       if (p->magic != pset_magic(&ca->sb))
+                       if (p->magic != pset_magic(&ca->sb)) {
                                pr_warn("bad magic reading priorities");
+                               goto out;
+                       }
 
                        bucket = p->next_bucket;
                        d = p->data;
@@ -640,6 +645,10 @@ static void prio_read(struct cache *ca, uint64_t bucket)
                b->prio = le16_to_cpu(d->prio);
                b->gen = b->last_gc = d->gen;
        }
+
+       ret = 0;
+out:
+       return ret;
 }
 
 /* Bcache device */
@@ -1873,8 +1882,10 @@ static int run_cache_set(struct cache_set *c)
                j = &list_entry(journal.prev, struct journal_replay, list)->j;
 
                err = "IO error reading priorities";
-               for_each_cache(ca, c, i)
-                       prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
+               for_each_cache(ca, c, i) {
+                       if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
+                               goto err;
+               }
 
                /*
                 * If prio_read() fails it'll call cache_set_error and we'll
index 733e2dd..3470fae 100644 (file)
@@ -27,6 +27,12 @@ static const char * const bch_cache_modes[] = {
        NULL
 };
 
+static const char * const bch_reada_cache_policies[] = {
+       "all",
+       "meta-only",
+       NULL
+};
+
 /* Default is 0 ("auto") */
 static const char * const bch_stop_on_failure_modes[] = {
        "auto",
@@ -100,6 +106,7 @@ rw_attribute(congested_write_threshold_us);
 rw_attribute(sequential_cutoff);
 rw_attribute(data_csum);
 rw_attribute(cache_mode);
+rw_attribute(readahead_cache_policy);
 rw_attribute(stop_when_cache_set_failed);
 rw_attribute(writeback_metadata);
 rw_attribute(writeback_running);
@@ -168,6 +175,11 @@ SHOW(__bch_cached_dev)
                                               bch_cache_modes,
                                               BDEV_CACHE_MODE(&dc->sb));
 
+       if (attr == &sysfs_readahead_cache_policy)
+               return bch_snprint_string_list(buf, PAGE_SIZE,
+                                             bch_reada_cache_policies,
+                                             dc->cache_readahead_policy);
+
        if (attr == &sysfs_stop_when_cache_set_failed)
                return bch_snprint_string_list(buf, PAGE_SIZE,
                                               bch_stop_on_failure_modes,
@@ -353,6 +365,15 @@ STORE(__cached_dev)
                }
        }
 
+       if (attr == &sysfs_readahead_cache_policy) {
+               v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
+               if (v < 0)
+                       return v;
+
+               if ((unsigned int) v != dc->cache_readahead_policy)
+                       dc->cache_readahead_policy = v;
+       }
+
        if (attr == &sysfs_stop_when_cache_set_failed) {
                v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
                if (v < 0)
@@ -467,6 +488,7 @@ static struct attribute *bch_cached_dev_files[] = {
        &sysfs_data_csum,
 #endif
        &sysfs_cache_mode,
+       &sysfs_readahead_cache_policy,
        &sysfs_stop_when_cache_set_failed,
        &sysfs_writeback_metadata,
        &sysfs_writeback_running,
index 4824d50..469f551 100644 (file)
@@ -8279,13 +8279,12 @@ static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
        return mask;
 }
 
-static const struct file_operations md_seq_fops = {
-       .owner          = THIS_MODULE,
-       .open           = md_seq_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
-       .poll           = mdstat_poll,
+static const struct proc_ops mdstat_proc_ops = {
+       .proc_open      = md_seq_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
+       .proc_poll      = mdstat_poll,
 };
 
 int register_md_personality(struct md_personality *p)
@@ -9454,7 +9453,7 @@ static void md_geninit(void)
 {
        pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
 
-       proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
+       proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
 }
 
 static int __init md_init(void)
index f048e89..0e7e277 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/cec.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
 #include <media/cec.h>
index d0c28a4..39e6116 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/kconfig.h>
 #include <linux/mfd/core.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
 #include <linux/of_platform.h>
index 1916fa6..2d2266c 100644 (file)
@@ -11,6 +11,7 @@ config OCXL
        tristate "OpenCAPI coherent accelerator support"
        depends on PPC_POWERNV && PCI && EEH
        select OCXL_BASE
+       select HOTPLUG_PCI_POWERNV
        default m
        help
          Select this option to enable the ocxl driver for Open
index 2817f47..97b8b38 100644 (file)
@@ -255,28 +255,28 @@ static int options_open(struct inode *inode, struct file *file)
 }
 
 /* *INDENT-OFF* */
-static const struct file_operations statistics_fops = {
-       .open           = statistics_open,
-       .read           = seq_read,
-       .write          = statistics_write,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops statistics_proc_ops = {
+       .proc_open      = statistics_open,
+       .proc_read      = seq_read,
+       .proc_write     = statistics_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
-static const struct file_operations mcs_statistics_fops = {
-       .open           = mcs_statistics_open,
-       .read           = seq_read,
-       .write          = mcs_statistics_write,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops mcs_statistics_proc_ops = {
+       .proc_open      = mcs_statistics_open,
+       .proc_read      = seq_read,
+       .proc_write     = mcs_statistics_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
-static const struct file_operations options_fops = {
-       .open           = options_open,
-       .read           = seq_read,
-       .write          = options_write,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops options_proc_ops = {
+       .proc_open      = options_open,
+       .proc_read      = seq_read,
+       .proc_write     = options_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 static struct proc_dir_entry *proc_gru __read_mostly;
@@ -286,11 +286,11 @@ int gru_proc_init(void)
        proc_gru = proc_mkdir("sgi_uv/gru", NULL);
        if (!proc_gru)
                return -1;
-       if (!proc_create("statistics", 0644, proc_gru, &statistics_fops))
+       if (!proc_create("statistics", 0644, proc_gru, &statistics_proc_ops))
                goto err;
-       if (!proc_create("mcs_statistics", 0644, proc_gru, &mcs_statistics_fops))
+       if (!proc_create("mcs_statistics", 0644, proc_gru, &mcs_statistics_proc_ops))
                goto err;
-       if (!proc_create("debug_options", 0644, proc_gru, &options_fops))
+       if (!proc_create("debug_options", 0644, proc_gru, &options_proc_ops))
                goto err;
        if (!proc_create_seq("cch_status", 0444, proc_gru, &cch_seq_ops))
                goto err;
index 25fb72b..2f93c25 100644 (file)
@@ -1180,7 +1180,7 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
                 * MTD device name.
                 */
                mtd = get_mtd_device_nm(mtd_dev);
-               if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
+               if (PTR_ERR(mtd) == -ENODEV)
                        /* Probably this is an MTD character device node path */
                        mtd = open_mtd_by_chdev(mtd_dev);
        } else
index 483935b..597e6fd 100644 (file)
@@ -7893,7 +7893,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
        int tcs, i;
 
        tcs = netdev_get_num_tc(dev);
-       if (tcs > 1) {
+       if (tcs) {
                int i, off, count;
 
                for (i = 0; i < tcs; i++) {
@@ -9241,6 +9241,17 @@ void bnxt_half_close_nic(struct bnxt *bp)
        bnxt_free_mem(bp, false);
 }
 
+static void bnxt_reenable_sriov(struct bnxt *bp)
+{
+       if (BNXT_PF(bp)) {
+               struct bnxt_pf_info *pf = &bp->pf;
+               int n = pf->active_vfs;
+
+               if (n)
+                       bnxt_cfg_hw_sriov(bp, &n, true);
+       }
+}
+
 static int bnxt_open(struct net_device *dev)
 {
        struct bnxt *bp = netdev_priv(dev);
@@ -9259,15 +9270,10 @@ static int bnxt_open(struct net_device *dev)
                bnxt_hwrm_if_change(bp, false);
        } else {
                if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
-                       if (BNXT_PF(bp)) {
-                               struct bnxt_pf_info *pf = &bp->pf;
-                               int n = pf->active_vfs;
-
-                               if (n)
-                                       bnxt_cfg_hw_sriov(bp, &n, true);
-                       }
-                       if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+                       if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
                                bnxt_ulp_start(bp, 0);
+                               bnxt_reenable_sriov(bp);
+                       }
                }
                bnxt_hwmon_open(bp);
        }
@@ -9307,10 +9313,6 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
        bnxt_debug_dev_exit(bp);
        bnxt_disable_napi(bp);
        del_timer_sync(&bp->timer);
-       if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
-           pci_is_enabled(bp->pdev))
-               pci_disable_device(bp->pdev);
-
        bnxt_free_skbs(bp);
 
        /* Save ring stats before shutdown */
@@ -10096,9 +10098,16 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
 static void bnxt_fw_reset_close(struct bnxt *bp)
 {
        bnxt_ulp_stop(bp);
+       /* When firmware is fatal state, disable PCI device to prevent
+        * any potential bad DMAs before freeing kernel memory.
+        */
+       if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+               pci_disable_device(bp->pdev);
        __bnxt_close_nic(bp, true, false);
        bnxt_clear_int_mode(bp);
        bnxt_hwrm_func_drv_unrgtr(bp);
+       if (pci_is_enabled(bp->pdev))
+               pci_disable_device(bp->pdev);
        bnxt_free_ctx_mem(bp);
        kfree(bp->ctx);
        bp->ctx = NULL;
@@ -10831,6 +10840,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                smp_mb__before_atomic();
                clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
                bnxt_ulp_start(bp, rc);
+               if (!rc)
+                       bnxt_reenable_sriov(bp);
                bnxt_dl_health_recovery_done(bp);
                bnxt_dl_health_status_update(bp, true);
                rtnl_unlock();
index 8247d21..b945bd3 100644 (file)
@@ -171,9 +171,9 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
        }
 
        msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
-       if (!msghdr) {
+       if (IS_ERR(msghdr)) {
                otx2_mbox_unlock(&pfvf->mbox);
-               return -ENOMEM;
+               return PTR_ERR(msghdr);
        }
        rsp = (struct nix_get_mac_addr_rsp *)msghdr;
        ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
index 79a2801..02526c5 100644 (file)
@@ -614,7 +614,7 @@ mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
        /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
         * Kbits/s.
         */
-       return p->rate.rate_bytes_ps / 1000 * 8;
+       return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
 }
 
 static int
index f131ada..ce07c29 100644 (file)
@@ -866,7 +866,7 @@ struct ionic_rxq_comp {
 #define IONIC_RXQ_COMP_CSUM_F_VLAN     0x40
 #define IONIC_RXQ_COMP_CSUM_F_CALC     0x80
        u8     pkt_type_color;
-#define IONIC_RXQ_COMP_PKT_TYPE_MASK   0x0f
+#define IONIC_RXQ_COMP_PKT_TYPE_MASK   0x7f
 };
 
 enum ionic_pkt_type {
index fbfff2b..1a636ba 100644 (file)
@@ -1398,14 +1398,11 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
 {
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
        struct qed_qm_pf_rt_init_params params;
-       struct qed_mcp_link_state *p_link;
        struct qed_qm_iids iids;
 
        memset(&iids, 0, sizeof(iids));
        qed_cxt_qm_iids(p_hwfn, &iids);
 
-       p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
-
        memset(&params, 0, sizeof(params));
        params.port_id = p_hwfn->port_id;
        params.pf_id = p_hwfn->rel_pf_id;
index 7912911..03bdd2e 100644 (file)
@@ -3114,6 +3114,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
                if (!p_hwfn->fw_overlay_mem) {
                        DP_NOTICE(p_hwfn,
                                  "Failed to allocate fw overlay memory\n");
+                       rc = -ENOMEM;
                        goto load_err;
                }
 
index ff1cbfc..5836b21 100644 (file)
@@ -4974,6 +4974,7 @@ int stmmac_suspend(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
+       u32 chan;
 
        if (!ndev || !netif_running(ndev))
                return 0;
@@ -4987,6 +4988,9 @@ int stmmac_suspend(struct device *dev)
 
        stmmac_disable_all_queues(priv);
 
+       for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+               del_timer_sync(&priv->tx_queue[chan].txtimer);
+
        /* Stop TX/RX DMA */
        stmmac_stop_all_dma(priv);
 
index 7032a24..af07ea7 100644 (file)
@@ -767,12 +767,12 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
        int i;
 
        gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
-                                      GFP_KERNEL);
+                                      GFP_KERNEL | __GFP_NOWARN);
        if (gtp->addr_hash == NULL)
                return -ENOMEM;
 
        gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
-                                     GFP_KERNEL);
+                                     GFP_KERNEL | __GFP_NOWARN);
        if (gtp->tid_hash == NULL)
                goto err1;
 
index 2b74425..0b362b8 100644 (file)
@@ -218,6 +218,7 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
 {
        struct nsim_bpf_bound_prog *state;
        char name[16];
+       int ret;
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
        if (!state)
@@ -230,9 +231,10 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
        /* Program id is not populated yet when we create the state. */
        sprintf(name, "%u", nsim_dev->prog_id_gen++);
        state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
-       if (IS_ERR_OR_NULL(state->ddir)) {
+       if (IS_ERR(state->ddir)) {
+               ret = PTR_ERR(state->ddir);
                kfree(state);
-               return -ENOMEM;
+               return ret;
        }
 
        debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
@@ -587,8 +589,8 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
 
        nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
                                                            nsim_dev->ddir);
-       if (IS_ERR_OR_NULL(nsim_dev->ddir_bpf_bound_progs))
-               return -ENOMEM;
+       if (IS_ERR(nsim_dev->ddir_bpf_bound_progs))
+               return PTR_ERR(nsim_dev->ddir_bpf_bound_progs);
 
        nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
        err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
index 6aeed0c..7971dc4 100644 (file)
@@ -17,6 +17,7 @@
 static DEFINE_IDA(nsim_bus_dev_ids);
 static LIST_HEAD(nsim_bus_dev_list);
 static DEFINE_MUTEX(nsim_bus_dev_list_lock);
+static bool nsim_bus_enable;
 
 static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
 {
@@ -28,7 +29,7 @@ static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
 {
        nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
                                          sizeof(struct nsim_vf_config),
-                                         GFP_KERNEL);
+                                         GFP_KERNEL | __GFP_NOWARN);
        if (!nsim_bus_dev->vfconfigs)
                return -ENOMEM;
        nsim_bus_dev->num_vfs = num_vfs;
@@ -96,13 +97,25 @@ new_port_store(struct device *dev, struct device_attribute *attr,
               const char *buf, size_t count)
 {
        struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+       struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
+       struct devlink *devlink;
        unsigned int port_index;
        int ret;
 
+       /* Prevent to use nsim_bus_dev before initialization. */
+       if (!smp_load_acquire(&nsim_bus_dev->init))
+               return -EBUSY;
        ret = kstrtouint(buf, 0, &port_index);
        if (ret)
                return ret;
+
+       devlink = priv_to_devlink(nsim_dev);
+
+       mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+       devlink_reload_disable(devlink);
        ret = nsim_dev_port_add(nsim_bus_dev, port_index);
+       devlink_reload_enable(devlink);
+       mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
        return ret ? ret : count;
 }
 
@@ -113,13 +126,25 @@ del_port_store(struct device *dev, struct device_attribute *attr,
               const char *buf, size_t count)
 {
        struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+       struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
+       struct devlink *devlink;
        unsigned int port_index;
        int ret;
 
+       /* Prevent to use nsim_bus_dev before initialization. */
+       if (!smp_load_acquire(&nsim_bus_dev->init))
+               return -EBUSY;
        ret = kstrtouint(buf, 0, &port_index);
        if (ret)
                return ret;
+
+       devlink = priv_to_devlink(nsim_dev);
+
+       mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+       devlink_reload_disable(devlink);
        ret = nsim_dev_port_del(nsim_bus_dev, port_index);
+       devlink_reload_enable(devlink);
+       mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
        return ret ? ret : count;
 }
 
@@ -179,15 +204,30 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
                pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
                return -EINVAL;
        }
-       nsim_bus_dev = nsim_bus_dev_new(id, port_count);
-       if (IS_ERR(nsim_bus_dev))
-               return PTR_ERR(nsim_bus_dev);
 
        mutex_lock(&nsim_bus_dev_list_lock);
+       /* Prevent to use resource before initialization. */
+       if (!smp_load_acquire(&nsim_bus_enable)) {
+               err = -EBUSY;
+               goto err;
+       }
+
+       nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+       if (IS_ERR(nsim_bus_dev)) {
+               err = PTR_ERR(nsim_bus_dev);
+               goto err;
+       }
+
+       /* Allow using nsim_bus_dev */
+       smp_store_release(&nsim_bus_dev->init, true);
+
        list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
        mutex_unlock(&nsim_bus_dev_list_lock);
 
        return count;
+err:
+       mutex_unlock(&nsim_bus_dev_list_lock);
+       return err;
 }
 static BUS_ATTR_WO(new_device);
 
@@ -215,6 +255,11 @@ del_device_store(struct bus_type *bus, const char *buf, size_t count)
 
        err = -ENOENT;
        mutex_lock(&nsim_bus_dev_list_lock);
+       /* Prevent to use resource before initialization. */
+       if (!smp_load_acquire(&nsim_bus_enable)) {
+               mutex_unlock(&nsim_bus_dev_list_lock);
+               return -EBUSY;
+       }
        list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
                if (nsim_bus_dev->dev.id != id)
                        continue;
@@ -284,6 +329,9 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
        nsim_bus_dev->dev.type = &nsim_bus_dev_type;
        nsim_bus_dev->port_count = port_count;
        nsim_bus_dev->initial_net = current->nsproxy->net_ns;
+       mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
+       /* Disallow using nsim_bus_dev */
+       smp_store_release(&nsim_bus_dev->init, false);
 
        err = device_register(&nsim_bus_dev->dev);
        if (err)
@@ -299,6 +347,8 @@ err_nsim_bus_dev_free:
 
 static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
 {
+       /* Disallow using nsim_bus_dev */
+       smp_store_release(&nsim_bus_dev->init, false);
        device_unregister(&nsim_bus_dev->dev);
        ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
        kfree(nsim_bus_dev);
@@ -320,6 +370,8 @@ int nsim_bus_init(void)
        err = driver_register(&nsim_driver);
        if (err)
                goto err_bus_unregister;
+       /* Allow using resources */
+       smp_store_release(&nsim_bus_enable, true);
        return 0;
 
 err_bus_unregister:
@@ -331,12 +383,16 @@ void nsim_bus_exit(void)
 {
        struct nsim_bus_dev *nsim_bus_dev, *tmp;
 
+       /* Disallow using resources */
+       smp_store_release(&nsim_bus_enable, false);
+
        mutex_lock(&nsim_bus_dev_list_lock);
        list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
                list_del(&nsim_bus_dev->list);
                nsim_bus_dev_del(nsim_bus_dev);
        }
        mutex_unlock(&nsim_bus_dev_list_lock);
+
        driver_unregister(&nsim_driver);
        bus_unregister(&nsim_bus);
 }
index b53fbc0..5c5427c 100644 (file)
@@ -73,23 +73,26 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
 
 static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
 {
-       char dev_ddir_name[16];
+       char dev_ddir_name[sizeof(DRV_NAME) + 10];
 
        sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
        nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
-       if (IS_ERR_OR_NULL(nsim_dev->ddir))
-               return PTR_ERR_OR_ZERO(nsim_dev->ddir) ?: -EINVAL;
+       if (IS_ERR(nsim_dev->ddir))
+               return PTR_ERR(nsim_dev->ddir);
        nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
-       if (IS_ERR_OR_NULL(nsim_dev->ports_ddir))
-               return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
+       if (IS_ERR(nsim_dev->ports_ddir))
+               return PTR_ERR(nsim_dev->ports_ddir);
        debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
                            &nsim_dev->fw_update_status);
        debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
                           &nsim_dev->max_macs);
        debugfs_create_bool("test1", 0600, nsim_dev->ddir,
                            &nsim_dev->test1);
-       debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
-                           &nsim_dev_take_snapshot_fops);
+       nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
+                                                     0200,
+                                                     nsim_dev->ddir,
+                                                     nsim_dev,
+                                               &nsim_dev_take_snapshot_fops);
        debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
                            &nsim_dev->dont_allow_reload);
        debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
@@ -112,8 +115,8 @@ static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
        sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
        nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
                                                 nsim_dev->ports_ddir);
-       if (IS_ERR_OR_NULL(nsim_dev_port->ddir))
-               return -ENOMEM;
+       if (IS_ERR(nsim_dev_port->ddir))
+               return PTR_ERR(nsim_dev_port->ddir);
 
        sprintf(dev_link_name, "../../../" DRV_NAME "%u",
                nsim_dev->nsim_bus_dev->dev.id);
@@ -740,6 +743,11 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
        if (err)
                goto err_health_exit;
 
+       nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
+                                                     0200,
+                                                     nsim_dev->ddir,
+                                                     nsim_dev,
+                                               &nsim_dev_take_snapshot_fops);
        return 0;
 
 err_health_exit:
@@ -853,6 +861,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
 
        if (devlink_is_reload_failed(devlink))
                return;
+       debugfs_remove(nsim_dev->take_snapshot);
        nsim_dev_port_del_all(nsim_dev);
        nsim_dev_health_exit(nsim_dev);
        nsim_dev_traps_exit(devlink);
@@ -925,8 +934,8 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
 int nsim_dev_init(void)
 {
        nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
-       if (IS_ERR_OR_NULL(nsim_dev_ddir))
-               return -ENOMEM;
+       if (IS_ERR(nsim_dev_ddir))
+               return PTR_ERR(nsim_dev_ddir);
        return 0;
 }
 
index 9aa637d..ba8d9ad 100644 (file)
@@ -82,7 +82,7 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
        if (err)
                return err;
 
-       binary = kmalloc(binary_len, GFP_KERNEL);
+       binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN);
        if (!binary)
                return -ENOMEM;
        get_random_bytes(binary, binary_len);
@@ -285,8 +285,8 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
        }
 
        health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
-       if (IS_ERR_OR_NULL(health->ddir)) {
-               err = PTR_ERR_OR_ZERO(health->ddir) ?: -EINVAL;
+       if (IS_ERR(health->ddir)) {
+               err = PTR_ERR(health->ddir);
                goto err_dummy_reporter_destroy;
        }
 
index 94df795..2eb7b0d 100644 (file)
@@ -160,6 +160,7 @@ struct nsim_dev {
        struct nsim_trap_data *trap_data;
        struct dentry *ddir;
        struct dentry *ports_ddir;
+       struct dentry *take_snapshot;
        struct bpf_offload_dev *bpf_dev;
        bool bpf_bind_accept;
        u32 bpf_bind_verifier_delay;
@@ -240,6 +241,9 @@ struct nsim_bus_dev {
                                  */
        unsigned int num_vfs;
        struct nsim_vf_config *vfconfigs;
+       /* Lock for devlink->reload_enabled in netdevsim module */
+       struct mutex nsim_bus_reload_lock;
+       bool init;
 };
 
 int nsim_bus_init(void);
diff --git a/drivers/net/netdevsim/sdev.c b/drivers/net/netdevsim/sdev.c
deleted file mode 100644 (file)
index 6712da3..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
-
-#include <linux/debugfs.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include "netdevsim.h"
-
-static struct dentry *nsim_sdev_ddir;
-
-static u32 nsim_sdev_id;
-
-struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns)
-{
-       struct netdevsim_shared_dev *sdev;
-       char sdev_ddir_name[10];
-       int err;
-
-       if (joinns) {
-               if (WARN_ON(!joinns->sdev))
-                       return ERR_PTR(-EINVAL);
-               sdev = joinns->sdev;
-               sdev->refcnt++;
-               return sdev;
-       }
-
-       sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
-       if (!sdev)
-               return ERR_PTR(-ENOMEM);
-       sdev->refcnt = 1;
-       sdev->switch_id = nsim_sdev_id++;
-
-       sprintf(sdev_ddir_name, "%u", sdev->switch_id);
-       sdev->ddir = debugfs_create_dir(sdev_ddir_name, nsim_sdev_ddir);
-       if (IS_ERR_OR_NULL(sdev->ddir)) {
-               err = PTR_ERR_OR_ZERO(sdev->ddir) ?: -EINVAL;
-               goto err_sdev_free;
-       }
-
-       return sdev;
-
-err_sdev_free:
-       nsim_sdev_id--;
-       kfree(sdev);
-       return ERR_PTR(err);
-}
-
-void nsim_sdev_put(struct netdevsim_shared_dev *sdev)
-{
-       if (--sdev->refcnt)
-               return;
-       debugfs_remove_recursive(sdev->ddir);
-       kfree(sdev);
-}
-
-int nsim_sdev_init(void)
-{
-       nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
-       if (IS_ERR_OR_NULL(nsim_sdev_ddir))
-               return -ENOMEM;
-       return 0;
-}
-
-void nsim_sdev_exit(void)
-{
-       debugfs_remove_recursive(nsim_sdev_ddir);
-}
index aee6261..481cf48 100644 (file)
@@ -489,6 +489,14 @@ static int at803x_probe(struct phy_device *phydev)
        return at803x_parse_dt(phydev);
 }
 
+static void at803x_remove(struct phy_device *phydev)
+{
+       struct at803x_priv *priv = phydev->priv;
+
+       if (priv->vddio)
+               regulator_disable(priv->vddio);
+}
+
 static int at803x_clk_out_config(struct phy_device *phydev)
 {
        struct at803x_priv *priv = phydev->priv;
@@ -711,6 +719,7 @@ static struct phy_driver at803x_driver[] = {
        .name                   = "Qualcomm Atheros AR8035",
        .phy_id_mask            = AT803X_PHY_ID_MASK,
        .probe                  = at803x_probe,
+       .remove                 = at803x_remove,
        .config_init            = at803x_config_init,
        .set_wol                = at803x_set_wol,
        .get_wol                = at803x_get_wol,
@@ -726,6 +735,7 @@ static struct phy_driver at803x_driver[] = {
        .name                   = "Qualcomm Atheros AR8030",
        .phy_id_mask            = AT803X_PHY_ID_MASK,
        .probe                  = at803x_probe,
+       .remove                 = at803x_remove,
        .config_init            = at803x_config_init,
        .link_change_notify     = at803x_link_change_notify,
        .set_wol                = at803x_set_wol,
@@ -741,6 +751,7 @@ static struct phy_driver at803x_driver[] = {
        .name                   = "Qualcomm Atheros AR8031/AR8033",
        .phy_id_mask            = AT803X_PHY_ID_MASK,
        .probe                  = at803x_probe,
+       .remove                 = at803x_remove,
        .config_init            = at803x_config_init,
        .set_wol                = at803x_set_wol,
        .get_wol                = at803x_get_wol,
index 7a9ad54..bf86c9c 100644 (file)
@@ -123,7 +123,7 @@ static int g12a_ephy_pll_is_enabled(struct clk_hw *hw)
        return (val & PLL_CTL0_LOCK_DIG) ? 1 : 0;
 }
 
-static void g12a_ephy_pll_init(struct clk_hw *hw)
+static int g12a_ephy_pll_init(struct clk_hw *hw)
 {
        struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
 
@@ -136,6 +136,8 @@ static void g12a_ephy_pll_init(struct clk_hw *hw)
        writel(0x20200000, pll->base + ETH_PLL_CTL5);
        writel(0x0000c002, pll->base + ETH_PLL_CTL6);
        writel(0x00000023, pll->base + ETH_PLL_CTL7);
+
+       return 0;
 }
 
 static const struct clk_ops g12a_ephy_pll_ops = {
index 2f12c5d..b71b745 100644 (file)
@@ -111,6 +111,13 @@ void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
        struct mii_timestamping_desc *desc;
        struct list_head *this;
 
+       /* mii_timestamper statically registered by the PHY driver won't use the
+        * register_mii_timestamper() and thus don't have ->device set. Don't
+        * try to unregister these.
+        */
+       if (!mii_ts->device)
+               return;
+
        mutex_lock(&tstamping_devices_lock);
        list_for_each(this, &mii_timestamping_devices) {
                desc = list_entry(this, struct mii_timestamping_desc, list);
index e8cd8c0..78ddbaf 100644 (file)
@@ -698,6 +698,9 @@ enum rtl8152_flags {
 #define VENDOR_ID_NVIDIA               0x0955
 #define VENDOR_ID_TPLINK               0x2357
 
+#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2      0x3082
+#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2             0xa387
+
 #define MCU_TYPE_PLA                   0x0100
 #define MCU_TYPE_USB                   0x0000
 
@@ -6759,9 +6762,13 @@ static int rtl8152_probe(struct usb_interface *intf,
                netdev->hw_features &= ~NETIF_F_RXCSUM;
        }
 
-       if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO &&
-           le16_to_cpu(udev->descriptor.idProduct) == 0x3082)
-               set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+       if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
+               switch (le16_to_cpu(udev->descriptor.idProduct)) {
+               case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
+               case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+                       set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+               }
+       }
 
        if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
            (!strcmp(udev->serial, "000001000000") ||
index c4c8f1b..8363f91 100644 (file)
@@ -4420,73 +4420,65 @@ static int proc_BSSList_open( struct inode *inode, struct file *file );
 static int proc_config_open( struct inode *inode, struct file *file );
 static int proc_wepkey_open( struct inode *inode, struct file *file );
 
-static const struct file_operations proc_statsdelta_ops = {
-       .owner          = THIS_MODULE,
-       .read           = proc_read,
-       .open           = proc_statsdelta_open,
-       .release        = proc_close,
-       .llseek         = default_llseek,
+static const struct proc_ops proc_statsdelta_ops = {
+       .proc_read      = proc_read,
+       .proc_open      = proc_statsdelta_open,
+       .proc_release   = proc_close,
+       .proc_lseek     = default_llseek,
 };
 
-static const struct file_operations proc_stats_ops = {
-       .owner          = THIS_MODULE,
-       .read           = proc_read,
-       .open           = proc_stats_open,
-       .release        = proc_close,
-       .llseek         = default_llseek,
+static const struct proc_ops proc_stats_ops = {
+       .proc_read      = proc_read,
+       .proc_open      = proc_stats_open,
+       .proc_release   = proc_close,
+       .proc_lseek     = default_llseek,
 };
 
-static const struct file_operations proc_status_ops = {
-       .owner          = THIS_MODULE,
-       .read           = proc_read,
-       .open           = proc_status_open,
-       .release        = proc_close,
-       .llseek         = default_llseek,
+static const struct proc_ops proc_status_ops = {
+       .proc_read      = proc_read,
+       .proc_open      = proc_status_open,
+       .proc_release   = proc_close,
+       .proc_lseek     = default_llseek,
 };
 
-static const struct file_operations proc_SSID_ops = {
-       .owner          = THIS_MODULE,
-       .read           = proc_read,
-       .write          = proc_write,
-       .open           = proc_SSID_open,
-       .release        = proc_close,
-       .llseek         = default_llseek,
+static const struct proc_ops proc_SSID_ops = {
+       .proc_read      = proc_read,
+       .proc_write     = proc_write,
+       .proc_open      = proc_SSID_open,
+       .proc_release   = proc_close,
+       .proc_lseek     = default_llseek,
 };
 
-static const struct file_operations proc_BSSList_ops = {
-       .owner          = THIS_MODULE,
-       .read           = proc_read,
-       .write          = proc_write,
-       .open           = proc_BSSList_open,
-       .release        = proc_close,
-       .llseek         = default_llseek,
+static const struct proc_ops proc_BSSList_ops = {
+       .proc_read      = proc_read,
+       .proc_write     = proc_write,
+       .proc_open      = proc_BSSList_open,
+       .proc_release   = proc_close,
+       .proc_lseek     = default_llseek,
 };
 
-static const struct file_operations proc_APList_ops = {
-       .owner          = THIS_MODULE,
-       .read           = proc_read,
-       .write          = proc_write,
-       .open           = proc_APList_open,
-       .release        = proc_close,
-       .llseek         = default_llseek,
+static const struct proc_ops proc_APList_ops = {
+       .proc_read      = proc_read,
+       .proc_write     = proc_write,
+       .proc_open      = proc_APList_open,
+       .proc_release   = proc_close,
+       .proc_lseek     = default_llseek,
 };
 
-static const struct file_operations proc_config_ops = {
-       .owner          = THIS_MODULE,
-       .read           = proc_read,
-       .write          = proc_write,
-       .open           = proc_config_open,
-       .release        = proc_close,
-       .llseek         = default_llseek,
+static const struct proc_ops proc_config_ops = {
+       .proc_read      = proc_read,
+       .proc_write     = proc_write,
+       .proc_open      = proc_config_open,
+       .proc_release   = proc_close,
+       .proc_lseek     = default_llseek,
 };
 
-static const struct file_operations proc_wepkey_ops = {
-       .owner          = THIS_MODULE,
-       .read           = proc_read,
-       .write          = proc_write,
-       .open           = proc_wepkey_open,
-       .release        = proc_close,
-       .llseek         = default_llseek,
+static const struct proc_ops proc_wepkey_ops = {
+       .proc_read      = proc_read,
+       .proc_write     = proc_write,
+       .proc_open      = proc_wepkey_open,
+       .proc_release   = proc_close,
+       .proc_lseek     = default_llseek,
 };
 
 static struct proc_dir_entry *airo_entry;
index 436b819..43bab92 100644 (file)
@@ -240,13 +240,12 @@ static ssize_t debug_level_proc_write(struct file *file,
        return strnlen(buf, len);
 }
 
-static const struct file_operations debug_level_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = debug_level_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = debug_level_proc_write,
+static const struct proc_ops debug_level_proc_ops = {
+       .proc_open      = debug_level_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = debug_level_proc_write,
 };
 #endif                         /* CONFIG_LIBIPW_DEBUG */
 
@@ -263,7 +262,7 @@ static int __init libipw_init(void)
                return -EIO;
        }
        e = proc_create("debug_level", 0644, libipw_proc,
-                       &debug_level_proc_fops);
+                       &debug_level_proc_ops);
        if (!e) {
                remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
                libipw_proc = NULL;
index e323e9a..58212c5 100644 (file)
@@ -126,7 +126,7 @@ static void prism2_check_sta_fw_version(local_info_t *local);
 
 #ifdef PRISM2_DOWNLOAD_SUPPORT
 /* hostap_download.c */
-static const struct file_operations prism2_download_aux_dump_proc_fops;
+static const struct proc_ops prism2_download_aux_dump_proc_ops;
 static u8 * prism2_read_pda(struct net_device *dev);
 static int prism2_download(local_info_t *local,
                           struct prism2_download_param *param);
@@ -3094,7 +3094,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
        local->func->reset_port = prism2_reset_port;
        local->func->schedule_reset = prism2_schedule_reset;
 #ifdef PRISM2_DOWNLOAD_SUPPORT
-       local->func->read_aux_fops = &prism2_download_aux_dump_proc_fops;
+       local->func->read_aux_proc_ops = &prism2_download_aux_dump_proc_ops;
        local->func->download = prism2_download;
 #endif /* PRISM2_DOWNLOAD_SUPPORT */
        local->func->tx = prism2_tx_80211;
index 6151d8d..a2ee469 100644 (file)
@@ -211,9 +211,9 @@ static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
        return count;
 }
 
-static const struct file_operations prism2_pda_proc_fops = {
-       .read           = prism2_pda_proc_read,
-       .llseek         = generic_file_llseek,
+static const struct proc_ops prism2_pda_proc_ops = {
+       .proc_read      = prism2_pda_proc_read,
+       .proc_lseek     = generic_file_llseek,
 };
 
 
@@ -223,8 +223,8 @@ static ssize_t prism2_aux_dump_proc_no_read(struct file *file, char __user *buf,
        return 0;
 }
 
-static const struct file_operations prism2_aux_dump_proc_fops = {
-       .read           = prism2_aux_dump_proc_no_read,
+static const struct proc_ops prism2_aux_dump_proc_ops = {
+       .proc_read      = prism2_aux_dump_proc_no_read,
 };
 
 
@@ -379,9 +379,9 @@ void hostap_init_proc(local_info_t *local)
        proc_create_seq_data("wds", 0, local->proc,
                        &prism2_wds_proc_seqops, local);
        proc_create_data("pda", 0, local->proc,
-                        &prism2_pda_proc_fops, local);
+                        &prism2_pda_proc_ops, local);
        proc_create_data("aux_dump", 0, local->proc,
-                        local->func->read_aux_fops ?: &prism2_aux_dump_proc_fops,
+                        local->func->read_aux_proc_ops ?: &prism2_aux_dump_proc_ops,
                         local);
        proc_create_seq_data("bss_list", 0, local->proc,
                        &prism2_bss_list_proc_seqops, local);
index a8c4c1a..487883f 100644 (file)
@@ -599,7 +599,7 @@ struct prism2_helper_functions {
                        struct prism2_download_param *param);
        int (*tx)(struct sk_buff *skb, struct net_device *dev);
        int (*set_tim)(struct net_device *dev, int aid, int set);
-       const struct file_operations *read_aux_fops;
+       const struct proc_ops *read_aux_proc_ops;
 
        int need_tx_headroom; /* number of bytes of headroom needed before
                               * IEEE 802.11 header */
index cf37268..c1d542b 100644 (file)
@@ -2717,10 +2717,9 @@ static ssize_t ray_cs_essid_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations ray_cs_essid_proc_fops = {
-       .owner          = THIS_MODULE,
-       .write          = ray_cs_essid_proc_write,
-       .llseek         = noop_llseek,
+static const struct proc_ops ray_cs_essid_proc_ops = {
+       .proc_write     = ray_cs_essid_proc_write,
+       .proc_lseek     = noop_llseek,
 };
 
 static ssize_t int_proc_write(struct file *file, const char __user *buffer,
@@ -2751,10 +2750,9 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
        return count;
 }
 
-static const struct file_operations int_proc_fops = {
-       .owner          = THIS_MODULE,
-       .write          = int_proc_write,
-       .llseek         = noop_llseek,
+static const struct proc_ops int_proc_ops = {
+       .proc_write     = int_proc_write,
+       .proc_lseek     = noop_llseek,
 };
 #endif
 
@@ -2790,10 +2788,10 @@ static int __init init_ray_cs(void)
        proc_mkdir("driver/ray_cs", NULL);
 
        proc_create_single("driver/ray_cs/ray_cs", 0, NULL, ray_cs_proc_show);
-       proc_create("driver/ray_cs/essid", 0200, NULL, &ray_cs_essid_proc_fops);
-       proc_create_data("driver/ray_cs/net_type", 0200, NULL, &int_proc_fops,
+       proc_create("driver/ray_cs/essid", 0200, NULL, &ray_cs_essid_proc_ops);
+       proc_create_data("driver/ray_cs/net_type", 0200, NULL, &int_proc_ops,
                         &net_type);
-       proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_fops,
+       proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_ops,
                         &translate);
 #endif
        if (translate != 0)
index 365a2dd..da392b5 100644 (file)
@@ -167,7 +167,6 @@ struct nvme_queue {
         /* only used for poll queues: */
        spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
        volatile struct nvme_completion *cqes;
-       struct blk_mq_tags **tags;
        dma_addr_t sq_dma_addr;
        dma_addr_t cq_dma_addr;
        u32 __iomem *q_db;
@@ -376,29 +375,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 
        WARN_ON(hctx_idx != 0);
        WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
-       WARN_ON(nvmeq->tags);
 
        hctx->driver_data = nvmeq;
-       nvmeq->tags = &dev->admin_tagset.tags[0];
        return 0;
 }
 
-static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
-{
-       struct nvme_queue *nvmeq = hctx->driver_data;
-
-       nvmeq->tags = NULL;
-}
-
 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
                          unsigned int hctx_idx)
 {
        struct nvme_dev *dev = data;
        struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
 
-       if (!nvmeq->tags)
-               nvmeq->tags = &dev->tagset.tags[hctx_idx];
-
        WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
        hctx->driver_data = nvmeq;
        return 0;
@@ -948,6 +935,13 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
                writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
 }
 
+static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
+{
+       if (!nvmeq->qid)
+               return nvmeq->dev->admin_tagset.tags[0];
+       return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
+}
+
 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
 {
        volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
@@ -972,7 +966,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
                return;
        }
 
-       req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
+       req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
        trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
        nvme_end_request(req, cqe->status, cqe->result);
 }
@@ -1572,7 +1566,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
        .queue_rq       = nvme_queue_rq,
        .complete       = nvme_pci_complete_rq,
        .init_hctx      = nvme_admin_init_hctx,
-       .exit_hctx      = nvme_admin_exit_hctx,
        .init_request   = nvme_init_request,
        .timeout        = nvme_timeout,
 };
index 28438b8..576de77 100644 (file)
@@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
        return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
 }
 
-static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
-{
-       struct nvmet_req *req;
-
-       while (1) {
-               mutex_lock(&ctrl->lock);
-               if (!ctrl->nr_async_event_cmds) {
-                       mutex_unlock(&ctrl->lock);
-                       return;
-               }
-
-               req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
-               mutex_unlock(&ctrl->lock);
-               nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
-       }
-}
-
-static void nvmet_async_event_work(struct work_struct *work)
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
 {
-       struct nvmet_ctrl *ctrl =
-               container_of(work, struct nvmet_ctrl, async_event_work);
        struct nvmet_async_event *aen;
        struct nvmet_req *req;
 
@@ -159,18 +140,41 @@ static void nvmet_async_event_work(struct work_struct *work)
                                struct nvmet_async_event, entry);
                if (!aen || !ctrl->nr_async_event_cmds) {
                        mutex_unlock(&ctrl->lock);
-                       return;
+                       break;
                }
 
                req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
-               nvmet_set_result(req, nvmet_async_event_result(aen));
+               if (status == 0)
+                       nvmet_set_result(req, nvmet_async_event_result(aen));
 
                list_del(&aen->entry);
                kfree(aen);
 
                mutex_unlock(&ctrl->lock);
-               nvmet_req_complete(req, 0);
+               nvmet_req_complete(req, status);
+       }
+}
+
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+       struct nvmet_req *req;
+
+       mutex_lock(&ctrl->lock);
+       while (ctrl->nr_async_event_cmds) {
+               req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+               mutex_unlock(&ctrl->lock);
+               nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+               mutex_lock(&ctrl->lock);
        }
+       mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+       struct nvmet_ctrl *ctrl =
+               container_of(work, struct nvmet_ctrl, async_event_work);
+
+       nvmet_async_events_process(ctrl, 0);
 }
 
 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
@@ -555,7 +559,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
        } else {
                struct nvmet_ns *old;
 
-               list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
+               list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
+                                       lockdep_is_held(&subsys->lock)) {
                        BUG_ON(ns->nsid == old->nsid);
                        if (ns->nsid < old->nsid)
                                break;
@@ -752,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
 
 void nvmet_sq_destroy(struct nvmet_sq *sq)
 {
+       u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
+       struct nvmet_ctrl *ctrl = sq->ctrl;
+
        /*
         * If this is the admin queue, complete all AERs so that our
         * queue doesn't have outstanding requests on it.
         */
-       if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
-               nvmet_async_events_free(sq->ctrl);
+       if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
+               nvmet_async_events_process(ctrl, status);
+               nvmet_async_events_free(ctrl);
+       }
        percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
        wait_for_completion(&sq->confirm_done);
        wait_for_completion(&sq->free_done);
        percpu_ref_exit(&sq->ref);
 
-       if (sq->ctrl) {
-               nvmet_ctrl_put(sq->ctrl);
+       if (ctrl) {
+               nvmet_ctrl_put(ctrl);
                sq->ctrl = NULL; /* allows reusing the queue later */
        }
 }
@@ -938,6 +948,17 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
 }
 EXPORT_SYMBOL_GPL(nvmet_check_data_len);
 
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
+{
+       if (unlikely(data_len > req->transfer_len)) {
+               req->error_loc = offsetof(struct nvme_common_command, dptr);
+               nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+               return false;
+       }
+
+       return true;
+}
+
 int nvmet_req_alloc_sgl(struct nvmet_req *req)
 {
        struct pci_dev *p2p_dev = NULL;
@@ -1172,7 +1193,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
 
        ctrl->p2p_client = get_device(req->p2p_client);
 
-       list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+       list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
+                               lockdep_is_held(&ctrl->subsys->lock))
                nvmet_p2pmem_ns_add_p2p(ctrl, ns);
 }
 
index f729747..feef15c 100644 (file)
@@ -109,6 +109,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
        u16 qid = le16_to_cpu(c->qid);
        u16 sqsize = le16_to_cpu(c->sqsize);
        struct nvmet_ctrl *old;
+       u16 ret;
 
        old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
        if (old) {
@@ -119,7 +120,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
        if (!sqsize) {
                pr_warn("queue size zero!\n");
                req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
-               return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+               ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+               goto err;
        }
 
        /* note: convert queue size from 0's-based value to 1's-based value */
@@ -132,16 +134,19 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
        }
 
        if (ctrl->ops->install_queue) {
-               u16 ret = ctrl->ops->install_queue(req->sq);
-
+               ret = ctrl->ops->install_queue(req->sq);
                if (ret) {
                        pr_err("failed to install queue %d cntlid %d ret %x\n",
-                               qid, ret, ctrl->cntlid);
-                       return ret;
+                               qid, ctrl->cntlid, ret);
+                       goto err;
                }
        }
 
        return 0;
+
+err:
+       req->sq->ctrl = NULL;
+       return ret;
 }
 
 static void nvmet_execute_admin_connect(struct nvmet_req *req)
index b6fca0e..ea0e596 100644 (file)
@@ -280,7 +280,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
 
 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
 {
-       if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+       if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
                return;
 
        switch (le32_to_cpu(req->cmd->dsm.attributes)) {
index caebfce..cd5670b 100644 (file)
@@ -336,7 +336,7 @@ static void nvmet_file_dsm_work(struct work_struct *w)
 
 static void nvmet_file_execute_dsm(struct nvmet_req *req)
 {
-       if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+       if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
                return;
        INIT_WORK(&req->f.work, nvmet_file_dsm_work);
        schedule_work(&req->f.work);
index 46df45e..eda28b2 100644 (file)
@@ -374,6 +374,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
                struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
 void nvmet_req_uninit(struct nvmet_req *req);
 bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
 void nvmet_req_complete(struct nvmet_req *req, u16 status);
 int nvmet_req_alloc_sgl(struct nvmet_req *req);
 void nvmet_req_free_sgl(struct nvmet_req *req);
index e9127db..27203bf 100644 (file)
@@ -161,7 +161,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
                coherent ? " " : " not ");
 
        iommu = of_iommu_configure(dev, np);
-       if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+       if (PTR_ERR(iommu) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
        dev_dbg(dev, "device is%sbehind an iommu\n",
index f5c2a54..8270bbf 100644 (file)
@@ -81,13 +81,15 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
        else
                phy = get_phy_device(mdio, addr, is_c45);
        if (IS_ERR(phy)) {
-               unregister_mii_timestamper(mii_ts);
+               if (mii_ts)
+                       unregister_mii_timestamper(mii_ts);
                return PTR_ERR(phy);
        }
 
        rc = of_irq_get(child, 0);
        if (rc == -EPROBE_DEFER) {
-               unregister_mii_timestamper(mii_ts);
+               if (mii_ts)
+                       unregister_mii_timestamper(mii_ts);
                phy_device_free(phy);
                return rc;
        }
@@ -116,12 +118,19 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
         * register it */
        rc = phy_device_register(phy);
        if (rc) {
-               unregister_mii_timestamper(mii_ts);
+               if (mii_ts)
+                       unregister_mii_timestamper(mii_ts);
                phy_device_free(phy);
                of_node_put(child);
                return rc;
        }
-       phy->mii_ts = mii_ts;
+
+       /* phy->mii_ts may already be defined by the PHY driver. A
+        * mii_timestamper probed via the device tree will still have
+        * precedence.
+        */
+       if (mii_ts)
+               phy->mii_ts = mii_ts;
 
        dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
                child, addr);
index eda2633..9210a95 100644 (file)
@@ -32,7 +32,7 @@
 
 #define OP_BUFFER_FLAGS        0
 
-static struct ring_buffer *op_ring_buffer;
+static struct trace_buffer *op_ring_buffer;
 DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
 
 static void wq_sync_buffer(struct work_struct *work);
index 73e37bb..36c6613 100644 (file)
@@ -230,13 +230,12 @@ parse_error:
        return -EINVAL;
 }
 
-static const struct file_operations led_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = led_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = led_proc_write,
+static const struct proc_ops led_proc_ops = {
+       .proc_open      = led_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = led_proc_write,
 };
 
 static int __init led_create_procfs(void)
@@ -252,14 +251,14 @@ static int __init led_create_procfs(void)
        if (!lcd_no_led_support)
        {
                ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
-                                       &led_proc_fops, (void *)LED_NOLCD); /* LED */
+                                       &led_proc_ops, (void *)LED_NOLCD); /* LED */
                if (!ent) return -1;
        }
 
        if (led_type == LED_HASLCD)
        {
                ent = proc_create_data("lcd", S_IRUGO|S_IWUSR, proc_pdc_root,
-                                       &led_proc_fops, (void *)LED_HASLCD); /* LCD */
+                                       &led_proc_ops, (void *)LED_HASLCD); /* LCD */
                if (!ent) return -1;
        }
 
index b6f064c..3ef0bb2 100644 (file)
@@ -69,6 +69,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
        dev->ats_enabled = 1;
        return 0;
 }
+EXPORT_SYMBOL_GPL(pci_enable_ats);
 
 /**
  * pci_disable_ats - disable the ATS capability
@@ -87,6 +88,7 @@ void pci_disable_ats(struct pci_dev *dev)
 
        dev->ats_enabled = 0;
 }
+EXPORT_SYMBOL_GPL(pci_disable_ats);
 
 void pci_restore_ats_state(struct pci_dev *dev)
 {
index ac93f5a..0e03cef 100644 (file)
@@ -1406,7 +1406,7 @@ static struct phy *devm_of_phy_optional_get_index(struct device *dev,
        phy = devm_of_phy_get(dev, np, name);
        kfree(name);
 
-       if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
+       if (PTR_ERR(phy) == -ENODEV)
                phy = NULL;
 
        return phy;
index d7b2b47..0456516 100644 (file)
@@ -18,6 +18,9 @@
 #define DRIVER_AUTHOR  "Gavin Shan, IBM Corporation"
 #define DRIVER_DESC    "PowerPC PowerNV PCI Hotplug Driver"
 
+#define SLOT_WARN(sl, x...) \
+       ((sl)->pdev ? pci_warn((sl)->pdev, x) : dev_warn(&(sl)->bus->dev, x))
+
 struct pnv_php_event {
        bool                    added;
        struct pnv_php_slot     *php_slot;
@@ -151,17 +154,11 @@ static void pnv_php_rmv_pdns(struct device_node *dn)
 static void pnv_php_detach_device_nodes(struct device_node *parent)
 {
        struct device_node *dn;
-       int refcount;
 
        for_each_child_of_node(parent, dn) {
                pnv_php_detach_device_nodes(dn);
 
                of_node_put(dn);
-               refcount = kref_read(&dn->kobj.kref);
-               if (refcount != 1)
-                       pr_warn("Invalid refcount %d on <%pOF>\n",
-                               refcount, dn);
-
                of_detach_node(dn);
        }
 }
@@ -271,7 +268,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
 
        ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
        if (ret) {
-               pci_warn(php_slot->pdev, "Error %d getting FDT blob\n", ret);
+               SLOT_WARN(php_slot, "Error %d getting FDT blob\n", ret);
                goto free_fdt1;
        }
 
@@ -285,7 +282,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
        dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
        if (!dt) {
                ret = -EINVAL;
-               pci_warn(php_slot->pdev, "Cannot unflatten FDT\n");
+               SLOT_WARN(php_slot, "Cannot unflatten FDT\n");
                goto free_fdt;
        }
 
@@ -295,15 +292,15 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
        ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
        if (ret) {
                pnv_php_reverse_nodes(php_slot->dn);
-               pci_warn(php_slot->pdev, "Error %d populating changeset\n",
-                        ret);
+               SLOT_WARN(php_slot, "Error %d populating changeset\n",
+                         ret);
                goto free_dt;
        }
 
        php_slot->dn->child = NULL;
        ret = of_changeset_apply(&php_slot->ocs);
        if (ret) {
-               pci_warn(php_slot->pdev, "Error %d applying changeset\n", ret);
+               SLOT_WARN(php_slot, "Error %d applying changeset\n", ret);
                goto destroy_changeset;
        }
 
@@ -342,18 +339,19 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
        ret = pnv_pci_set_power_state(php_slot->id, state, &msg);
        if (ret > 0) {
                if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle ||
-                   be64_to_cpu(msg.params[2]) != state                 ||
-                   be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
-                       pci_warn(php_slot->pdev, "Wrong msg (%lld, %lld, %lld)\n",
-                                be64_to_cpu(msg.params[1]),
-                                be64_to_cpu(msg.params[2]),
-                                be64_to_cpu(msg.params[3]));
+                   be64_to_cpu(msg.params[2]) != state) {
+                       SLOT_WARN(php_slot, "Wrong msg (%lld, %lld, %lld)\n",
+                                 be64_to_cpu(msg.params[1]),
+                                 be64_to_cpu(msg.params[2]),
+                                 be64_to_cpu(msg.params[3]));
                        return -ENOMSG;
                }
+               if (be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
+                       ret = -ENODEV;
+                       goto error;
+               }
        } else if (ret < 0) {
-               pci_warn(php_slot->pdev, "Error %d powering %s\n",
-                        ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
-               return ret;
+               goto error;
        }
 
        if (state == OPAL_PCI_SLOT_POWER_OFF || state == OPAL_PCI_SLOT_OFFLINE)
@@ -362,6 +360,11 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
                ret = pnv_php_add_devtree(php_slot);
 
        return ret;
+
+error:
+       SLOT_WARN(php_slot, "Error %d powering %s\n",
+                 ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
+       return ret;
 }
 EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state);
 
@@ -378,8 +381,8 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
         */
        ret = pnv_pci_get_power_state(php_slot->id, &power_state);
        if (ret) {
-               pci_warn(php_slot->pdev, "Error %d getting power status\n",
-                        ret);
+               SLOT_WARN(php_slot, "Error %d getting power status\n",
+                         ret);
        } else {
                *state = power_state;
        }
@@ -402,7 +405,7 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
                *state = presence;
                ret = 0;
        } else {
-               pci_warn(php_slot->pdev, "Error %d getting presence\n", ret);
+               SLOT_WARN(php_slot, "Error %d getting presence\n", ret);
        }
 
        return ret;
@@ -566,7 +569,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
        struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
        int ret;
 
-       if (php_slot->state != PNV_PHP_STATE_POPULATED)
+       /*
+        * Allow to disable a slot already in the registered state to
+        * cover cases where the slot couldn't be enabled and never
+        * reached the populated state
+        */
+       if (php_slot->state != PNV_PHP_STATE_POPULATED &&
+           php_slot->state != PNV_PHP_STATE_REGISTERED)
                return 0;
 
        /* Remove all devices behind the slot */
@@ -675,7 +684,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
        ret = pci_hp_register(&php_slot->slot, php_slot->bus,
                              php_slot->slot_no, php_slot->name);
        if (ret) {
-               pci_warn(php_slot->pdev, "Error %d registering slot\n", ret);
+               SLOT_WARN(php_slot, "Error %d registering slot\n", ret);
                return ret;
        }
 
@@ -728,7 +737,7 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
        /* Enable MSIx */
        ret = pci_enable_msix_exact(pdev, &entry, 1);
        if (ret) {
-               pci_warn(pdev, "Error %d enabling MSIx\n", ret);
+               SLOT_WARN(php_slot, "Error %d enabling MSIx\n", ret);
                return ret;
        }
 
@@ -778,8 +787,9 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
                   (sts & PCI_EXP_SLTSTA_PDC)) {
                ret = pnv_pci_get_presence_state(php_slot->id, &presence);
                if (ret) {
-                       pci_warn(pdev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
-                                php_slot->name, ret, sts);
+                       SLOT_WARN(php_slot,
+                                 "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
+                                 php_slot->name, ret, sts);
                        return IRQ_HANDLED;
                }
 
@@ -809,8 +819,9 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
         */
        event = kzalloc(sizeof(*event), GFP_ATOMIC);
        if (!event) {
-               pci_warn(pdev, "PCI slot [%s] missed hotplug event 0x%04x\n",
-                        php_slot->name, sts);
+               SLOT_WARN(php_slot,
+                         "PCI slot [%s] missed hotplug event 0x%04x\n",
+                         php_slot->name, sts);
                return IRQ_HANDLED;
        }
 
@@ -834,7 +845,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
        /* Allocate workqueue */
        php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
        if (!php_slot->wq) {
-               pci_warn(pdev, "Cannot alloc workqueue\n");
+               SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
                pnv_php_disable_irq(php_slot, true);
                return;
        }
@@ -858,7 +869,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
                          php_slot->name, php_slot);
        if (ret) {
                pnv_php_disable_irq(php_slot, true);
-               pci_warn(pdev, "Error %d enabling IRQ %d\n", ret, irq);
+               SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq);
                return;
        }
 
@@ -894,7 +905,7 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
 
        ret = pci_enable_device(pdev);
        if (ret) {
-               pci_warn(pdev, "Error %d enabling device\n", ret);
+               SLOT_WARN(php_slot, "Error %d enabling device\n", ret);
                return;
        }
 
@@ -1009,6 +1020,8 @@ static int __init pnv_php_init(void)
        for_each_compatible_node(dn, NULL, "ibm,ioda3-phb")
                pnv_php_register(dn);
 
+       for_each_compatible_node(dn, NULL, "ibm,ioda2-npu2-opencapi-phb")
+               pnv_php_register_one(dn); /* slot directly under the PHB */
        return 0;
 }
 
@@ -1021,6 +1034,9 @@ static void __exit pnv_php_exit(void)
 
        for_each_compatible_node(dn, NULL, "ibm,ioda3-phb")
                pnv_php_unregister(dn);
+
+       for_each_compatible_node(dn, NULL, "ibm,ioda2-npu2-opencapi-phb")
+               pnv_php_unregister_one(dn); /* slot directly under the PHB */
 }
 
 module_init(pnv_php_init);
index 3c30e72..d828ca8 100644 (file)
@@ -131,6 +131,7 @@ bool pci_ats_disabled(void)
 {
        return pcie_ats_disabled;
 }
+EXPORT_SYMBOL_GPL(pci_ats_disabled);
 
 /* Disable bridge_d3 for all PCIe ports */
 static bool pci_bridge_d3_disable;
index 6ef74bf..bd2b691 100644 (file)
@@ -306,19 +306,20 @@ static int proc_bus_pci_release(struct inode *inode, struct file *file)
 }
 #endif /* HAVE_PCI_MMAP */
 
-static const struct file_operations proc_bus_pci_operations = {
-       .owner          = THIS_MODULE,
-       .llseek         = proc_bus_pci_lseek,
-       .read           = proc_bus_pci_read,
-       .write          = proc_bus_pci_write,
-       .unlocked_ioctl = proc_bus_pci_ioctl,
-       .compat_ioctl   = proc_bus_pci_ioctl,
+static const struct proc_ops proc_bus_pci_ops = {
+       .proc_lseek     = proc_bus_pci_lseek,
+       .proc_read      = proc_bus_pci_read,
+       .proc_write     = proc_bus_pci_write,
+       .proc_ioctl     = proc_bus_pci_ioctl,
+#ifdef CONFIG_COMPAT
+       .proc_compat_ioctl = proc_bus_pci_ioctl,
+#endif
 #ifdef HAVE_PCI_MMAP
-       .open           = proc_bus_pci_open,
-       .release        = proc_bus_pci_release,
-       .mmap           = proc_bus_pci_mmap,
+       .proc_open      = proc_bus_pci_open,
+       .proc_release   = proc_bus_pci_release,
+       .proc_mmap      = proc_bus_pci_mmap,
 #ifdef HAVE_ARCH_PCI_GET_UNMAPPED_AREA
-       .get_unmapped_area = get_pci_unmapped_area,
+       .proc_get_unmapped_area = get_pci_unmapped_area,
 #endif /* HAVE_ARCH_PCI_GET_UNMAPPED_AREA */
 #endif /* HAVE_PCI_MMAP */
 };
@@ -424,7 +425,7 @@ int pci_proc_attach_device(struct pci_dev *dev)
 
        sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
        e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
-                            &proc_bus_pci_operations, dev);
+                            &proc_bus_pci_ops, dev);
        if (!e)
                return -ENOMEM;
        proc_set_size(e, dev->cfg_size);
index 2eb28cc..cd5a6c9 100644 (file)
@@ -712,7 +712,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
 {
        struct phy *phy = phy_get(dev, string);
 
-       if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
+       if (PTR_ERR(phy) == -ENODEV)
                phy = NULL;
 
        return phy;
@@ -766,7 +766,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
 {
        struct phy *phy = devm_phy_get(dev, string);
 
-       if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
+       if (PTR_ERR(phy) == -ENODEV)
                phy = NULL;
 
        return phy;
index 21c370d..bddf2c5 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/module.h>
+#include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinconf-generic.h>
 #include <linux/pinctrl/pinmux.h>
index 8723bcf..4f3651f 100644 (file)
@@ -63,7 +63,7 @@ struct acpi_peripheral {
 struct chromeos_laptop {
        /*
         * Note that we can't mark this pointer as const because
-        * i2c_new_probed_device() changes passed in I2C board info, so.
+        * i2c_new_scanned_device() changes passed in I2C board info, so.
         */
        struct i2c_peripheral *i2c_peripherals;
        unsigned int num_i2c_peripherals;
@@ -87,8 +87,8 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
         * address we scan secondary addresses. In any case the client
         * structure gets assigned primary address.
         */
-       client = i2c_new_probed_device(adapter, info, addr_list, NULL);
-       if (!client && alt_addr) {
+       client = i2c_new_scanned_device(adapter, info, addr_list, NULL);
+       if (IS_ERR(client) && alt_addr) {
                struct i2c_board_info dummy_info = {
                        I2C_BOARD_INFO("dummy", info->addr),
                };
@@ -97,9 +97,9 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
                };
                struct i2c_client *dummy;
 
-               dummy = i2c_new_probed_device(adapter, &dummy_info,
-                                             alt_addr_list, NULL);
-               if (dummy) {
+               dummy = i2c_new_scanned_device(adapter, &dummy_info,
+                                              alt_addr_list, NULL);
+               if (!IS_ERR(dummy)) {
                        pr_debug("%d-%02x is probed at %02x\n",
                                 adapter->nr, info->addr, dummy->addr);
                        i2c_unregister_device(dummy);
@@ -107,12 +107,14 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
                }
        }
 
-       if (!client)
+       if (IS_ERR(client)) {
+               client = NULL;
                pr_debug("failed to register device %d-%02x\n",
                         adapter->nr, info->addr);
-       else
+       } else {
                pr_debug("added i2c device %d-%02x\n",
                         adapter->nr, info->addr);
+       }
 
        return client;
 }
index 6d6ce86..6fc8f2c 100644 (file)
@@ -16,7 +16,8 @@
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
 #include <linux/suspend.h>
-#include <asm/unaligned.h>
+
+#include "cros_ec.h"
 
 #define CROS_EC_DEV_EC_INDEX 0
 #define CROS_EC_DEV_PD_INDEX 1
diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
new file mode 100644 (file)
index 0000000..e69fc1f
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ChromeOS Embedded Controller core interface.
+ *
+ * Copyright (C) 2020 Google LLC
+ */
+
+#ifndef __CROS_EC_H
+#define __CROS_EC_H
+
+int cros_ec_register(struct cros_ec_device *ec_dev);
+int cros_ec_unregister(struct cros_ec_device *ec_dev);
+
+int cros_ec_suspend(struct cros_ec_device *ec_dev);
+int cros_ec_resume(struct cros_ec_device *ec_dev);
+
+bool cros_ec_handle_event(struct cros_ec_device *ec_dev);
+
+#endif /* __CROS_EC_H */
index 74ded44..c65e70b 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/fs.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/notifier.h>
index 6ae4849..ecfada0 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/platform_data/cros_ec_commands.h>
index 9bd97bc..6119ecc 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
+#include "cros_ec.h"
+
 /**
  * Request format for protocol v3
  * byte 0      0xda (EC_COMMAND_PROTOCOL_3)
index e599682..93a71e9 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/platform_data/cros_ec_proto.h>
 #include <linux/intel-ish-client-if.h>
 
+#include "cros_ec.h"
+
 /*
  * ISH TX/RX ring buffer pool size
  *
@@ -76,7 +78,7 @@ struct cros_ish_in_msg {
  *
  * The writers are .reset() and .probe() function.
  */
-DECLARE_RWSEM(init_lock);
+static DECLARE_RWSEM(init_lock);
 
 /**
  * struct response_info - Encapsulate firmware response related
index c0f2eec..b4c110c 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/device.h>
 #include <linux/fs.h>
 #include <linux/kobject.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
index dccf479..1f78619 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/printk.h>
 #include <linux/suspend.h>
 
+#include "cros_ec.h"
 #include "cros_ec_lpc_mec.h"
 
 #define DRV_NAME "cros_ec_lpcs"
@@ -396,7 +397,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
         * Some boards do not have an IRQ allotted for cros_ec_lpc,
         * which makes ENXIO an expected (and safe) scenario.
         */
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        if (irq > 0)
                ec_dev->irq = irq;
        else if (irq != -ENXIO) {
index da1b1c4..3cfa643 100644 (file)
@@ -54,8 +54,6 @@ static int send_command(struct cros_ec_device *ec_dev,
        int ret;
        int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
 
-       trace_cros_ec_cmd(msg);
-
        if (ec_dev->proto_version > 2)
                xfer_fxn = ec_dev->pkt_xfer;
        else
@@ -72,7 +70,9 @@ static int send_command(struct cros_ec_device *ec_dev,
                return -EIO;
        }
 
+       trace_cros_ec_request_start(msg);
        ret = (*xfer_fxn)(ec_dev, msg);
+       trace_cros_ec_request_done(msg, ret);
        if (msg->result == EC_RES_IN_PROGRESS) {
                int i;
                struct cros_ec_command *status_msg;
@@ -95,7 +95,9 @@ static int send_command(struct cros_ec_device *ec_dev,
                for (i = 0; i < EC_COMMAND_RETRIES; i++) {
                        usleep_range(10000, 11000);
 
+                       trace_cros_ec_request_start(status_msg);
                        ret = (*xfer_fxn)(ec_dev, status_msg);
+                       trace_cros_ec_request_done(status_msg, ret);
                        if (ret == -EAGAIN)
                                continue;
                        if (ret < 0)
index bd068af..dbc3f55 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/rpmsg.h>
 #include <linux/slab.h>
 
+#include "cros_ec.h"
+
 #define EC_MSG_TIMEOUT_MS      200
 #define HOST_COMMAND_MARK      1
 #define HOST_EVENT_MARK                2
index 04d8879..79fefd3 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/module.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
 #include <linux/platform_data/cros_ec_sensorhub.h>
index a831bd5..46786d2 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/spi/spi.h>
 #include <uapi/linux/sched/types.h>
 
+#include "cros_ec.h"
+
 /* The header byte, which follows the preamble */
 #define EC_MSG_HEADER                  0xec
 
index 74d36b8..07dac97 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/device.h>
 #include <linux/fs.h>
 #include <linux/kobject.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
index 5af1d66..523a39b 100644 (file)
@@ -8,6 +8,11 @@
 // Generate the list using the following script:
 // sed -n 's/^#define \(EC_CMD_[[:alnum:]_]*\)\s.*/\tTRACE_SYMBOL(\1), \\/p' include/linux/platform_data/cros_ec_commands.h
 #define EC_CMDS \
+       TRACE_SYMBOL(EC_CMD_ACPI_READ), \
+       TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
+       TRACE_SYMBOL(EC_CMD_ACPI_BURST_ENABLE), \
+       TRACE_SYMBOL(EC_CMD_ACPI_BURST_DISABLE), \
+       TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
        TRACE_SYMBOL(EC_CMD_PROTO_VERSION), \
        TRACE_SYMBOL(EC_CMD_HELLO), \
        TRACE_SYMBOL(EC_CMD_GET_VERSION), \
@@ -22,6 +27,8 @@
        TRACE_SYMBOL(EC_CMD_GET_PROTOCOL_INFO), \
        TRACE_SYMBOL(EC_CMD_GSV_PAUSE_IN_S5), \
        TRACE_SYMBOL(EC_CMD_GET_FEATURES), \
+       TRACE_SYMBOL(EC_CMD_GET_SKU_ID), \
+       TRACE_SYMBOL(EC_CMD_SET_SKU_ID), \
        TRACE_SYMBOL(EC_CMD_FLASH_INFO), \
        TRACE_SYMBOL(EC_CMD_FLASH_READ), \
        TRACE_SYMBOL(EC_CMD_FLASH_WRITE), \
@@ -29,6 +36,8 @@
        TRACE_SYMBOL(EC_CMD_FLASH_PROTECT), \
        TRACE_SYMBOL(EC_CMD_FLASH_REGION_INFO), \
        TRACE_SYMBOL(EC_CMD_VBNV_CONTEXT), \
+       TRACE_SYMBOL(EC_CMD_FLASH_SPI_INFO), \
+       TRACE_SYMBOL(EC_CMD_FLASH_SELECT), \
        TRACE_SYMBOL(EC_CMD_PWM_GET_FAN_TARGET_RPM), \
        TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_TARGET_RPM), \
        TRACE_SYMBOL(EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT), \
@@ -40,6 +49,8 @@
        TRACE_SYMBOL(EC_CMD_LED_CONTROL), \
        TRACE_SYMBOL(EC_CMD_VBOOT_HASH), \
        TRACE_SYMBOL(EC_CMD_MOTION_SENSE_CMD), \
+       TRACE_SYMBOL(EC_CMD_FORCE_LID_OPEN), \
+       TRACE_SYMBOL(EC_CMD_CONFIG_POWER_BUTTON), \
        TRACE_SYMBOL(EC_CMD_USB_CHARGE_SET_MODE), \
        TRACE_SYMBOL(EC_CMD_PSTORE_INFO), \
        TRACE_SYMBOL(EC_CMD_PSTORE_READ), \
@@ -50,6 +61,9 @@
        TRACE_SYMBOL(EC_CMD_RTC_SET_ALARM), \
        TRACE_SYMBOL(EC_CMD_PORT80_LAST_BOOT), \
        TRACE_SYMBOL(EC_CMD_PORT80_READ), \
+       TRACE_SYMBOL(EC_CMD_VSTORE_INFO), \
+       TRACE_SYMBOL(EC_CMD_VSTORE_READ), \
+       TRACE_SYMBOL(EC_CMD_VSTORE_WRITE), \
        TRACE_SYMBOL(EC_CMD_THERMAL_SET_THRESHOLD), \
        TRACE_SYMBOL(EC_CMD_THERMAL_GET_THRESHOLD), \
        TRACE_SYMBOL(EC_CMD_THERMAL_AUTO_FAN_CTRL), \
        TRACE_SYMBOL(EC_CMD_MKBP_STATE), \
        TRACE_SYMBOL(EC_CMD_MKBP_INFO), \
        TRACE_SYMBOL(EC_CMD_MKBP_SIMULATE_KEY), \
+       TRACE_SYMBOL(EC_CMD_GET_KEYBOARD_ID), \
        TRACE_SYMBOL(EC_CMD_MKBP_SET_CONFIG), \
        TRACE_SYMBOL(EC_CMD_MKBP_GET_CONFIG), \
        TRACE_SYMBOL(EC_CMD_KEYSCAN_SEQ_CTRL), \
        TRACE_SYMBOL(EC_CMD_GET_NEXT_EVENT), \
+       TRACE_SYMBOL(EC_CMD_KEYBOARD_FACTORY_TEST), \
        TRACE_SYMBOL(EC_CMD_TEMP_SENSOR_GET_INFO), \
        TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_B), \
        TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SMI_MASK), \
@@ -73,6 +89,7 @@
        TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR), \
        TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_WAKE_MASK), \
        TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR_B), \
+       TRACE_SYMBOL(EC_CMD_HOST_EVENT), \
        TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_BKLIGHT), \
        TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_WIRELESS), \
        TRACE_SYMBOL(EC_CMD_GPIO_SET), \
        TRACE_SYMBOL(EC_CMD_CHARGE_STATE), \
        TRACE_SYMBOL(EC_CMD_CHARGE_CURRENT_LIMIT), \
        TRACE_SYMBOL(EC_CMD_EXTERNAL_POWER_LIMIT), \
+       TRACE_SYMBOL(EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT), \
+       TRACE_SYMBOL(EC_CMD_HIBERNATION_DELAY), \
        TRACE_SYMBOL(EC_CMD_HOST_SLEEP_EVENT), \
+       TRACE_SYMBOL(EC_CMD_DEVICE_EVENT), \
        TRACE_SYMBOL(EC_CMD_SB_READ_WORD), \
        TRACE_SYMBOL(EC_CMD_SB_WRITE_WORD), \
        TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
        TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
        TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
+       TRACE_SYMBOL(EC_CMD_SB_FW_UPDATE), \
+       TRACE_SYMBOL(EC_CMD_ENTERING_MODE), \
+       TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU_PROTECT), \
+       TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
+       TRACE_SYMBOL(EC_CMD_CEC_SET), \
+       TRACE_SYMBOL(EC_CMD_CEC_GET), \
        TRACE_SYMBOL(EC_CMD_EC_CODEC), \
        TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
        TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
        TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
        TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
        TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
-       TRACE_SYMBOL(EC_CMD_ACPI_READ), \
-       TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
-       TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
-       TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
-       TRACE_SYMBOL(EC_CMD_CEC_SET), \
-       TRACE_SYMBOL(EC_CMD_CEC_GET), \
        TRACE_SYMBOL(EC_CMD_REBOOT), \
        TRACE_SYMBOL(EC_CMD_RESEND_RESPONSE), \
        TRACE_SYMBOL(EC_CMD_VERSION0), \
        TRACE_SYMBOL(EC_CMD_PD_EXCHANGE_STATUS), \
+       TRACE_SYMBOL(EC_CMD_PD_HOST_EVENT_STATUS), \
        TRACE_SYMBOL(EC_CMD_USB_PD_CONTROL), \
        TRACE_SYMBOL(EC_CMD_USB_PD_PORTS), \
        TRACE_SYMBOL(EC_CMD_USB_PD_POWER_INFO), \
        TRACE_SYMBOL(EC_CMD_CHARGE_PORT_COUNT), \
+       TRACE_SYMBOL(EC_CMD_USB_PD_FW_UPDATE), \
+       TRACE_SYMBOL(EC_CMD_USB_PD_RW_HASH_ENTRY), \
+       TRACE_SYMBOL(EC_CMD_USB_PD_DEV_INFO), \
        TRACE_SYMBOL(EC_CMD_USB_PD_DISCOVERY), \
        TRACE_SYMBOL(EC_CMD_PD_CHARGE_PORT_OVERRIDE), \
        TRACE_SYMBOL(EC_CMD_PD_GET_LOG_ENTRY), \
-       TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO)
+       TRACE_SYMBOL(EC_CMD_USB_PD_GET_AMODE), \
+       TRACE_SYMBOL(EC_CMD_USB_PD_SET_AMODE), \
+       TRACE_SYMBOL(EC_CMD_PD_WRITE_LOG_ENTRY), \
+       TRACE_SYMBOL(EC_CMD_PD_CONTROL), \
+       TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO), \
+       TRACE_SYMBOL(EC_CMD_PD_CHIP_INFO), \
+       TRACE_SYMBOL(EC_CMD_RWSIG_CHECK_STATUS), \
+       TRACE_SYMBOL(EC_CMD_RWSIG_ACTION), \
+       TRACE_SYMBOL(EC_CMD_EFS_VERIFY), \
+       TRACE_SYMBOL(EC_CMD_GET_CROS_BOARD_INFO), \
+       TRACE_SYMBOL(EC_CMD_SET_CROS_BOARD_INFO), \
+       TRACE_SYMBOL(EC_CMD_GET_UPTIME_INFO), \
+       TRACE_SYMBOL(EC_CMD_ADD_ENTROPY), \
+       TRACE_SYMBOL(EC_CMD_ADC_READ), \
+       TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \
+       TRACE_SYMBOL(EC_CMD_AP_RESET), \
+       TRACE_SYMBOL(EC_CMD_CR51_BASE), \
+       TRACE_SYMBOL(EC_CMD_CR51_LAST), \
+       TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \
+       TRACE_SYMBOL(EC_CMD_FP_MODE), \
+       TRACE_SYMBOL(EC_CMD_FP_INFO), \
+       TRACE_SYMBOL(EC_CMD_FP_FRAME), \
+       TRACE_SYMBOL(EC_CMD_FP_TEMPLATE), \
+       TRACE_SYMBOL(EC_CMD_FP_CONTEXT), \
+       TRACE_SYMBOL(EC_CMD_FP_STATS), \
+       TRACE_SYMBOL(EC_CMD_FP_SEED), \
+       TRACE_SYMBOL(EC_CMD_FP_ENC_STATUS), \
+       TRACE_SYMBOL(EC_CMD_TP_SELF_TEST), \
+       TRACE_SYMBOL(EC_CMD_TP_FRAME_INFO), \
+       TRACE_SYMBOL(EC_CMD_TP_FRAME_SNAPSHOT), \
+       TRACE_SYMBOL(EC_CMD_TP_FRAME_GET), \
+       TRACE_SYMBOL(EC_CMD_BATTERY_GET_STATIC), \
+       TRACE_SYMBOL(EC_CMD_BATTERY_GET_DYNAMIC), \
+       TRACE_SYMBOL(EC_CMD_CHARGER_CONTROL), \
+       TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_BASE), \
+       TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_LAST)
+
+/* See the enum ec_status in include/linux/platform_data/cros_ec_commands.h */
+#define EC_RESULT \
+       TRACE_SYMBOL(EC_RES_SUCCESS), \
+       TRACE_SYMBOL(EC_RES_INVALID_COMMAND), \
+       TRACE_SYMBOL(EC_RES_ERROR), \
+       TRACE_SYMBOL(EC_RES_INVALID_PARAM), \
+       TRACE_SYMBOL(EC_RES_ACCESS_DENIED), \
+       TRACE_SYMBOL(EC_RES_INVALID_RESPONSE), \
+       TRACE_SYMBOL(EC_RES_INVALID_VERSION), \
+       TRACE_SYMBOL(EC_RES_INVALID_CHECKSUM), \
+       TRACE_SYMBOL(EC_RES_IN_PROGRESS), \
+       TRACE_SYMBOL(EC_RES_UNAVAILABLE), \
+       TRACE_SYMBOL(EC_RES_TIMEOUT), \
+       TRACE_SYMBOL(EC_RES_OVERFLOW), \
+       TRACE_SYMBOL(EC_RES_INVALID_HEADER), \
+       TRACE_SYMBOL(EC_RES_REQUEST_TRUNCATED), \
+       TRACE_SYMBOL(EC_RES_RESPONSE_TOO_BIG), \
+       TRACE_SYMBOL(EC_RES_BUS_ERROR), \
+       TRACE_SYMBOL(EC_RES_BUSY), \
+       TRACE_SYMBOL(EC_RES_INVALID_HEADER_VERSION), \
+       TRACE_SYMBOL(EC_RES_INVALID_HEADER_CRC), \
+       TRACE_SYMBOL(EC_RES_INVALID_DATA_CRC), \
+       TRACE_SYMBOL(EC_RES_DUP_UNAVAILABLE)
 
 #define CREATE_TRACE_POINTS
 #include "cros_ec_trace.h"
index 0dd4df3..e9fb05f 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <linux/tracepoint.h>
 
-DECLARE_EVENT_CLASS(cros_ec_cmd_class,
+TRACE_EVENT(cros_ec_request_start,
        TP_PROTO(struct cros_ec_command *cmd),
        TP_ARGS(cmd),
        TP_STRUCT__entry(
@@ -33,10 +33,26 @@ DECLARE_EVENT_CLASS(cros_ec_cmd_class,
                  __print_symbolic(__entry->command, EC_CMDS))
 );
 
-
-DEFINE_EVENT(cros_ec_cmd_class, cros_ec_cmd,
-       TP_PROTO(struct cros_ec_command *cmd),
-       TP_ARGS(cmd)
+TRACE_EVENT(cros_ec_request_done,
+       TP_PROTO(struct cros_ec_command *cmd, int retval),
+       TP_ARGS(cmd, retval),
+       TP_STRUCT__entry(
+               __field(uint32_t, version)
+               __field(uint32_t, command)
+               __field(uint32_t, result)
+               __field(int, retval)
+       ),
+       TP_fast_assign(
+               __entry->version = cmd->version;
+               __entry->command = cmd->command;
+               __entry->result = cmd->result;
+               __entry->retval = retval;
+       ),
+       TP_printk("version: %u, command: %s, ec result: %s, retval: %d",
+                 __entry->version,
+                 __print_symbolic(__entry->command, EC_CMDS),
+                 __print_symbolic(__entry->result, EC_RESULT),
+                 __entry->retval)
 );
 
 
index f11a128..8edae46 100644 (file)
@@ -6,7 +6,6 @@
 
 #include <linux/of.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
index 374cdd1..7de3ea7 100644 (file)
@@ -6,7 +6,6 @@
  */
 
 #include <linux/ktime.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
index 365f30e..49e8530 100644 (file)
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config WILCO_EC
        tristate "ChromeOS Wilco Embedded Controller"
-       depends on ACPI && X86 && CROS_EC_LPC && LEDS_CLASS
+       depends on X86 || COMPILE_TEST
+       depends on ACPI && CROS_EC_LPC && LEDS_CLASS
        help
          If you say Y here, you get support for talking to the ChromeOS
          Wilco EC over an eSPI bus. This uses a simple byte-level protocol
index 5210c35..5b42992 100644 (file)
@@ -94,7 +94,7 @@ static int wilco_ec_probe(struct platform_device *pdev)
 
        ret = wilco_ec_add_sysfs(ec);
        if (ret < 0) {
-               dev_err(dev, "Failed to create sysfs entries: %d", ret);
+               dev_err(dev, "Failed to create sysfs entries: %d\n", ret);
                goto unregister_rtc;
        }
 
@@ -137,9 +137,9 @@ static int wilco_ec_remove(struct platform_device *pdev)
 {
        struct wilco_ec_device *ec = platform_get_drvdata(pdev);
 
+       platform_device_unregister(ec->telem_pdev);
        platform_device_unregister(ec->charger_pdev);
        wilco_ec_remove_sysfs(ec);
-       platform_device_unregister(ec->telem_pdev);
        platform_device_unregister(ec->rtc_pdev);
        if (ec->debugfs_pdev)
                platform_device_unregister(ec->debugfs_pdev);
index 5731d1b..6ce9c67 100644 (file)
@@ -69,7 +69,7 @@ static int send_kbbl_msg(struct wilco_ec_device *ec,
        ret = wilco_ec_mailbox(ec, &msg);
        if (ret < 0) {
                dev_err(ec->dev,
-                       "Failed sending keyboard LEDs command: %d", ret);
+                       "Failed sending keyboard LEDs command: %d\n", ret);
                return ret;
        }
 
@@ -94,7 +94,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
 
        if (response.status) {
                dev_err(ec->dev,
-                       "EC reported failure sending keyboard LEDs command: %d",
+                       "EC reported failure sending keyboard LEDs command: %d\n",
                        response.status);
                return -EIO;
        }
@@ -147,7 +147,7 @@ static int kbbl_init(struct wilco_ec_device *ec)
 
        if (response.status) {
                dev_err(ec->dev,
-                       "EC reported failure sending keyboard LEDs command: %d",
+                       "EC reported failure sending keyboard LEDs command: %d\n",
                        response.status);
                return -EIO;
        }
@@ -179,7 +179,7 @@ int wilco_keyboard_leds_init(struct wilco_ec_device *ec)
        ret = kbbl_exist(ec, &leds_exist);
        if (ret < 0) {
                dev_err(ec->dev,
-                       "Failed checking keyboard LEDs support: %d", ret);
+                       "Failed checking keyboard LEDs support: %d\n", ret);
                return ret;
        }
        if (!leds_exist)
index ced1f9f..0f98358 100644 (file)
@@ -163,13 +163,13 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
        }
 
        if (rs->data_size != EC_MAILBOX_DATA_SIZE) {
-               dev_dbg(ec->dev, "unexpected packet size (%u != %u)",
+               dev_dbg(ec->dev, "unexpected packet size (%u != %u)\n",
                        rs->data_size, EC_MAILBOX_DATA_SIZE);
                return -EMSGSIZE;
        }
 
        if (rs->data_size < msg->response_size) {
-               dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)",
+               dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)\n",
                        rs->data_size, msg->response_size);
                return -EMSGSIZE;
        }
index 1176d54..e06d96f 100644 (file)
@@ -367,7 +367,7 @@ static int telem_device_probe(struct platform_device *pdev)
        minor = ida_alloc_max(&telem_ida, TELEM_MAX_DEV-1, GFP_KERNEL);
        if (minor < 0) {
                error = minor;
-               dev_err(&pdev->dev, "Failed to find minor number: %d", error);
+               dev_err(&pdev->dev, "Failed to find minor number: %d\n", error);
                return error;
        }
 
@@ -427,14 +427,14 @@ static int __init telem_module_init(void)
 
        ret = class_register(&telem_class);
        if (ret) {
-               pr_err(DRV_NAME ": Failed registering class: %d", ret);
+               pr_err(DRV_NAME ": Failed registering class: %d\n", ret);
                return ret;
        }
 
        /* Request the kernel for device numbers, starting with minor=0 */
        ret = alloc_chrdev_region(&dev_num, 0, TELEM_MAX_DEV, TELEM_DEV_NAME);
        if (ret) {
-               pr_err(DRV_NAME ": Failed allocating dev numbers: %d", ret);
+               pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret);
                goto destroy_class;
        }
        telem_major = MAJOR(dev_num);
index da794dc..8eaadba 100644 (file)
@@ -907,13 +907,12 @@ static ssize_t dispatch_proc_write(struct file *file,
        return ret;
 }
 
-static const struct file_operations dispatch_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = dispatch_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = dispatch_proc_write,
+static const struct proc_ops dispatch_proc_ops = {
+       .proc_open      = dispatch_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = dispatch_proc_write,
 };
 
 static char *next_cmd(char **cmds)
@@ -9984,7 +9983,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
                if (ibm->write)
                        mode |= S_IWUSR;
                entry = proc_create_data(ibm->name, mode, proc_dir,
-                                        &dispatch_proc_fops, ibm);
+                                        &dispatch_proc_ops, ibm);
                if (!entry) {
                        pr_err("unable to create proc entry %s\n", ibm->name);
                        ret = -ENODEV;
index a1e6569..8089445 100644 (file)
@@ -1432,13 +1432,12 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
        return count;
 }
 
-static const struct file_operations lcd_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = lcd_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = lcd_proc_write,
+static const struct proc_ops lcd_proc_ops = {
+       .proc_open      = lcd_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = lcd_proc_write,
 };
 
 /* Video-Out */
@@ -1539,13 +1538,12 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
        return ret ? -EIO : count;
 }
 
-static const struct file_operations video_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = video_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = video_proc_write,
+static const struct proc_ops video_proc_ops = {
+       .proc_open      = video_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = video_proc_write,
 };
 
 /* Fan status */
@@ -1617,13 +1615,12 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
        return count;
 }
 
-static const struct file_operations fan_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = fan_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = fan_proc_write,
+static const struct proc_ops fan_proc_ops = {
+       .proc_open      = fan_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = fan_proc_write,
 };
 
 static int keys_proc_show(struct seq_file *m, void *v)
@@ -1662,13 +1659,12 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf,
        return count;
 }
 
-static const struct file_operations keys_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = keys_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = keys_proc_write,
+static const struct proc_ops keys_proc_ops = {
+       .proc_open      = keys_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = keys_proc_write,
 };
 
 static int __maybe_unused version_proc_show(struct seq_file *m, void *v)
@@ -1688,16 +1684,16 @@ static void create_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
 {
        if (dev->backlight_dev)
                proc_create_data("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir,
-                                &lcd_proc_fops, dev);
+                                &lcd_proc_ops, dev);
        if (dev->video_supported)
                proc_create_data("video", S_IRUGO | S_IWUSR, toshiba_proc_dir,
-                                &video_proc_fops, dev);
+                                &video_proc_ops, dev);
        if (dev->fan_supported)
                proc_create_data("fan", S_IRUGO | S_IWUSR, toshiba_proc_dir,
-                                &fan_proc_fops, dev);
+                                &fan_proc_ops, dev);
        if (dev->hotkey_dev)
                proc_create_data("keys", S_IRUGO | S_IWUSR, toshiba_proc_dir,
-                                &keys_proc_fops, dev);
+                                &keys_proc_ops, dev);
        proc_create_single_data("version", S_IRUGO, toshiba_proc_dir,
                        version_proc_show, dev);
 }
index 3682097..785a796 100644 (file)
@@ -49,10 +49,9 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
        return nbytes;
 }
 
-static const struct file_operations isapnp_proc_bus_file_operations = {
-       .owner  = THIS_MODULE,
-       .llseek = isapnp_proc_bus_lseek,
-       .read = isapnp_proc_bus_read,
+static const struct proc_ops isapnp_proc_bus_proc_ops = {
+       .proc_lseek     = isapnp_proc_bus_lseek,
+       .proc_read      = isapnp_proc_bus_read,
 };
 
 static int isapnp_proc_attach_device(struct pnp_dev *dev)
@@ -69,7 +68,7 @@ static int isapnp_proc_attach_device(struct pnp_dev *dev)
        }
        sprintf(name, "%02x", dev->number);
        e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de,
-                       &isapnp_proc_bus_file_operations, dev);
+                                           &isapnp_proc_bus_proc_ops, dev);
        if (!e)
                return -ENOMEM;
        proc_set_size(e, 256);
index fe1c8f5..a806830 100644 (file)
@@ -210,13 +210,12 @@ out:
        return ret;
 }
 
-static const struct file_operations pnpbios_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = pnpbios_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = pnpbios_proc_write,
+static const struct proc_ops pnpbios_proc_ops = {
+       .proc_open      = pnpbios_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = pnpbios_proc_write,
 };
 
 int pnpbios_interface_attach_device(struct pnp_bios_node *node)
@@ -228,13 +227,13 @@ int pnpbios_interface_attach_device(struct pnp_bios_node *node)
        if (!proc_pnp)
                return -EIO;
        if (!pnpbios_dont_use_current_config) {
-               proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops,
+               proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_ops,
                                 (void *)(long)(node->handle));
        }
 
        if (!proc_pnp_boot)
                return -EIO;
-       if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops,
+       if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_ops,
                             (void *)(long)(node->handle + 0x100)))
                return 0;
        return -EIO;
index ffad9ee..30c3d37 100644 (file)
@@ -5,7 +5,6 @@
  * Copyright (c) 2014 - 2018 Google, Inc
  */
 
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
index bd21655..30190be 100644 (file)
@@ -100,7 +100,7 @@ config PWM_BCM_KONA
 
 config PWM_BCM2835
        tristate "BCM2835 PWM support"
-       depends on ARCH_BCM2835
+       depends on ARCH_BCM2835 || ARCH_BRCMSTB
        help
          PWM framework driver for BCM2835 controller (Raspberry Pi)
 
@@ -328,7 +328,8 @@ config PWM_MXS
 
 config PWM_OMAP_DMTIMER
        tristate "OMAP Dual-Mode Timer PWM support"
-       depends on OF && ARCH_OMAP && OMAP_DM_TIMER
+       depends on OF
+       depends on OMAP_DM_TIMER || COMPILE_TEST
        help
          Generic PWM framework driver for OMAP Dual-Mode Timer PWM output
 
@@ -490,7 +491,7 @@ config PWM_TEGRA
          To compile this driver as a module, choose M here: the module
          will be called pwm-tegra.
 
-config  PWM_TIECAP
+config PWM_TIECAP
        tristate "ECAP PWM support"
        depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3
        help
@@ -499,7 +500,7 @@ config  PWM_TIECAP
          To compile this driver as a module, choose M here: the module
          will be called pwm-tiecap.
 
-config  PWM_TIEHRPWM
+config PWM_TIEHRPWM
        tristate "EHRPWM PWM support"
        depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3
        help
index f877e77..5a7f659 100644 (file)
@@ -20,6 +20,9 @@
 
 #include <dt-bindings/pwm/pwm.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/pwm.h>
+
 #define MAX_PWMS 1024
 
 static DEFINE_MUTEX(pwm_lookup_lock);
@@ -114,6 +117,11 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
                }
        }
 
+       if (pwm->chip->ops->get_state) {
+               pwm->chip->ops->get_state(pwm->chip, pwm, &pwm->state);
+               trace_pwm_get(pwm, &pwm->state);
+       }
+
        set_bit(PWMF_REQUESTED, &pwm->flags);
        pwm->label = label;
 
@@ -283,9 +291,6 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
                pwm->hwpwm = i;
                pwm->state.polarity = polarity;
 
-               if (chip->ops->get_state)
-                       chip->ops->get_state(chip, pwm, &pwm->state);
-
                radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
        }
 
@@ -472,6 +477,8 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
                if (err)
                        return err;
 
+               trace_pwm_apply(pwm, state);
+
                pwm->state = *state;
        } else {
                /*
index 9ba7334..6161e7e 100644 (file)
@@ -4,6 +4,19 @@
  *
  * Copyright (C) 2013 Atmel Corporation
  *              Bo Shen <voice.shen@atmel.com>
+ *
+ * Links to reference manuals for the supported PWM chips can be found in
+ * Documentation/arm/microchip.rst.
+ *
+ * Limitations:
+ * - Periods start with the inactive level.
+ * - Hardware has to be stopped in general to update settings.
+ *
+ * Software bugs/possible improvements:
+ * - When atmel_pwm_apply() is called with state->enabled=false a change in
+ *   state->polarity isn't honored.
+ * - Instead of sleeping to wait for a completed period, the interrupt
+ *   functionality could be used.
  */
 
 #include <linux/clk.h>
@@ -47,6 +60,8 @@
 #define PWMV2_CPRD             0x0C
 #define PWMV2_CPRDUPD          0x10
 
+#define PWM_MAX_PRES           10
+
 struct atmel_pwm_registers {
        u8 period;
        u8 period_upd;
@@ -55,8 +70,7 @@ struct atmel_pwm_registers {
 };
 
 struct atmel_pwm_config {
-       u32 max_period;
-       u32 max_pres;
+       u32 period_bits;
 };
 
 struct atmel_pwm_data {
@@ -97,7 +111,7 @@ static inline u32 atmel_pwm_ch_readl(struct atmel_pwm_chip *chip,
 {
        unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
 
-       return readl_relaxed(chip->base + base + offset);
+       return atmel_pwm_readl(chip, base + offset);
 }
 
 static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
@@ -106,7 +120,7 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
 {
        unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
 
-       writel_relaxed(val, chip->base + base + offset);
+       atmel_pwm_writel(chip, base + offset, val);
 }
 
 static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
@@ -115,17 +129,27 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
 {
        struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
        unsigned long long cycles = state->period;
+       int shift;
 
        /* Calculate the period cycles and prescale value */
        cycles *= clk_get_rate(atmel_pwm->clk);
        do_div(cycles, NSEC_PER_SEC);
 
-       for (*pres = 0; cycles > atmel_pwm->data->cfg.max_period; cycles >>= 1)
-               (*pres)++;
+       /*
+        * The register for the period length is cfg.period_bits bits wide.
+        * So for each bit the number of clock cycles is wider divide the input
+        * clock frequency by two using pres and shift cprd accordingly.
+        */
+       shift = fls(cycles) - atmel_pwm->data->cfg.period_bits;
 
-       if (*pres > atmel_pwm->data->cfg.max_pres) {
+       if (shift > PWM_MAX_PRES) {
                dev_err(chip->dev, "pres exceeds the maximum value\n");
                return -EINVAL;
+       } else if (shift > 0) {
+               *pres = shift;
+               cycles >>= *pres;
+       } else {
+               *pres = 0;
        }
 
        *cprd = cycles;
@@ -271,8 +295,48 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        return 0;
 }
 
+static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+                               struct pwm_state *state)
+{
+       struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+       u32 sr, cmr;
+
+       sr = atmel_pwm_readl(atmel_pwm, PWM_SR);
+       cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+
+       if (sr & (1 << pwm->hwpwm)) {
+               unsigned long rate = clk_get_rate(atmel_pwm->clk);
+               u32 cdty, cprd, pres;
+               u64 tmp;
+
+               pres = cmr & PWM_CMR_CPRE_MSK;
+
+               cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+                                         atmel_pwm->data->regs.period);
+               tmp = (u64)cprd * NSEC_PER_SEC;
+               tmp <<= pres;
+               state->period = DIV64_U64_ROUND_UP(tmp, rate);
+
+               cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+                                         atmel_pwm->data->regs.duty);
+               tmp = (u64)cdty * NSEC_PER_SEC;
+               tmp <<= pres;
+               state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
+
+               state->enabled = true;
+       } else {
+               state->enabled = false;
+       }
+
+       if (cmr & PWM_CMR_CPOL)
+               state->polarity = PWM_POLARITY_INVERSED;
+       else
+               state->polarity = PWM_POLARITY_NORMAL;
+}
+
 static const struct pwm_ops atmel_pwm_ops = {
        .apply = atmel_pwm_apply,
+       .get_state = atmel_pwm_get_state,
        .owner = THIS_MODULE,
 };
 
@@ -285,8 +349,7 @@ static const struct atmel_pwm_data atmel_sam9rl_pwm_data = {
        },
        .cfg = {
                /* 16 bits to keep period and duty. */
-               .max_period     = 0xffff,
-               .max_pres       = 10,
+               .period_bits    = 16,
        },
 };
 
@@ -299,8 +362,7 @@ static const struct atmel_pwm_data atmel_sama5_pwm_data = {
        },
        .cfg = {
                /* 16 bits to keep period and duty. */
-               .max_period     = 0xffff,
-               .max_pres       = 10,
+               .period_bits    = 16,
        },
 };
 
@@ -313,8 +375,7 @@ static const struct atmel_pwm_data mchp_sam9x60_pwm_data = {
        },
        .cfg = {
                /* 32 bits to keep period and duty. */
-               .max_period     = 0xffffffff,
-               .max_pres       = 10,
+               .period_bits    = 32,
        },
 };
 
index 8949744..09c08de 100644 (file)
@@ -25,11 +25,39 @@ struct cros_ec_pwm_device {
        struct pwm_chip chip;
 };
 
+/**
+ * struct cros_ec_pwm - per-PWM driver data
+ * @duty_cycle: cached duty cycle
+ */
+struct cros_ec_pwm {
+       u16 duty_cycle;
+};
+
 static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *c)
 {
        return container_of(c, struct cros_ec_pwm_device, chip);
 }
 
+static int cros_ec_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct cros_ec_pwm *channel;
+
+       channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+       if (!channel)
+               return -ENOMEM;
+
+       pwm_set_chip_data(pwm, channel);
+
+       return 0;
+}
+
+static void cros_ec_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+
+       kfree(channel);
+}
+
 static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
 {
        struct {
@@ -96,7 +124,9 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                             const struct pwm_state *state)
 {
        struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
-       int duty_cycle;
+       struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+       u16 duty_cycle;
+       int ret;
 
        /* The EC won't let us change the period */
        if (state->period != EC_PWM_MAX_DUTY)
@@ -108,13 +138,20 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         */
        duty_cycle = state->enabled ? state->duty_cycle : 0;
 
-       return cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+       ret = cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+       if (ret < 0)
+               return ret;
+
+       channel->duty_cycle = state->duty_cycle;
+
+       return 0;
 }
 
 static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
                                  struct pwm_state *state)
 {
        struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
+       struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
        int ret;
 
        ret = cros_ec_pwm_get_duty(ec_pwm->ec, pwm->hwpwm);
@@ -126,8 +163,19 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
        state->enabled = (ret > 0);
        state->period = EC_PWM_MAX_DUTY;
 
-       /* Note that "disabled" and "duty cycle == 0" are treated the same */
-       state->duty_cycle = ret;
+       /*
+        * Note that "disabled" and "duty cycle == 0" are treated the same. If
+        * the cached duty cycle is not zero, used the cached duty cycle. This
+        * ensures that the configured duty cycle is kept across a disable and
+        * enable operation and avoids potentially confusing consumers.
+        *
+        * For the case of the initial hardware readout, channel->duty_cycle
+        * will be 0 and the actual duty cycle read from the EC is used.
+        */
+       if (ret == 0 && channel->duty_cycle > 0)
+               state->duty_cycle = channel->duty_cycle;
+       else
+               state->duty_cycle = ret;
 }
 
 static struct pwm_device *
@@ -149,6 +197,8 @@ cros_ec_pwm_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
 }
 
 static const struct pwm_ops cros_ec_pwm_ops = {
+       .request = cros_ec_pwm_request,
+       .free = cros_ec_pwm_free,
        .get_state      = cros_ec_pwm_get_state,
        .apply          = cros_ec_pwm_apply,
        .owner          = THIS_MODULE,
index ae11d85..35a7ac4 100644 (file)
@@ -85,6 +85,13 @@ struct pwm_imx27_chip {
        struct clk      *clk_per;
        void __iomem    *mmio_base;
        struct pwm_chip chip;
+
+       /*
+        * The driver cannot read the current duty cycle from the hardware if
+        * the hardware is disabled. Cache the last programmed duty cycle
+        * value to return in that case.
+        */
+       unsigned int duty_cycle;
 };
 
 #define to_pwm_imx27_chip(chip)        container_of(chip, struct pwm_imx27_chip, chip)
@@ -155,14 +162,17 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
        tmp = NSEC_PER_SEC * (u64)(period + 2);
        state->period = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
 
-       /* PWMSAR can be read only if PWM is enabled */
-       if (state->enabled) {
+       /*
+        * PWMSAR can be read only if PWM is enabled. If the PWM is disabled,
+        * use the cached value.
+        */
+       if (state->enabled)
                val = readl(imx->mmio_base + MX3_PWMSAR);
-               tmp = NSEC_PER_SEC * (u64)(val);
-               state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
-       } else {
-               state->duty_cycle = 0;
-       }
+       else
+               val = imx->duty_cycle;
+
+       tmp = NSEC_PER_SEC * (u64)(val);
+       state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
 
        if (!state->enabled)
                pwm_imx27_clk_disable_unprepare(chip);
@@ -220,63 +230,68 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
        pwm_get_state(pwm, &cstate);
 
-       if (state->enabled) {
-               c = clk_get_rate(imx->clk_per);
-               c *= state->period;
-
-               do_div(c, 1000000000);
-               period_cycles = c;
-
-               prescale = period_cycles / 0x10000 + 1;
-
-               period_cycles /= prescale;
-               c = (unsigned long long)period_cycles * state->duty_cycle;
-               do_div(c, state->period);
-               duty_cycles = c;
-
-               /*
-                * according to imx pwm RM, the real period value should be
-                * PERIOD value in PWMPR plus 2.
-                */
-               if (period_cycles > 2)
-                       period_cycles -= 2;
-               else
-                       period_cycles = 0;
-
-               /*
-                * Wait for a free FIFO slot if the PWM is already enabled, and
-                * flush the FIFO if the PWM was disabled and is about to be
-                * enabled.
-                */
-               if (cstate.enabled) {
-                       pwm_imx27_wait_fifo_slot(chip, pwm);
-               } else {
-                       ret = pwm_imx27_clk_prepare_enable(chip);
-                       if (ret)
-                               return ret;
-
-                       pwm_imx27_sw_reset(chip);
-               }
-
-               writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
-               writel(period_cycles, imx->mmio_base + MX3_PWMPR);
-
-               cr = MX3_PWMCR_PRESCALER_SET(prescale) |
-                    MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEN | MX3_PWMCR_WAITEN |
-                    FIELD_PREP(MX3_PWMCR_CLKSRC, MX3_PWMCR_CLKSRC_IPG_HIGH) |
-                    MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
-
-               if (state->polarity == PWM_POLARITY_INVERSED)
-                       cr |= FIELD_PREP(MX3_PWMCR_POUTC,
-                                       MX3_PWMCR_POUTC_INVERTED);
-
-               writel(cr, imx->mmio_base + MX3_PWMCR);
-       } else if (cstate.enabled) {
-               writel(0, imx->mmio_base + MX3_PWMCR);
+       c = clk_get_rate(imx->clk_per);
+       c *= state->period;
 
-               pwm_imx27_clk_disable_unprepare(chip);
+       do_div(c, 1000000000);
+       period_cycles = c;
+
+       prescale = period_cycles / 0x10000 + 1;
+
+       period_cycles /= prescale;
+       c = (unsigned long long)period_cycles * state->duty_cycle;
+       do_div(c, state->period);
+       duty_cycles = c;
+
+       /*
+        * according to imx pwm RM, the real period value should be PERIOD
+        * value in PWMPR plus 2.
+        */
+       if (period_cycles > 2)
+               period_cycles -= 2;
+       else
+               period_cycles = 0;
+
+       /*
+        * Wait for a free FIFO slot if the PWM is already enabled, and flush
+        * the FIFO if the PWM was disabled and is about to be enabled.
+        */
+       if (cstate.enabled) {
+               pwm_imx27_wait_fifo_slot(chip, pwm);
+       } else {
+               ret = pwm_imx27_clk_prepare_enable(chip);
+               if (ret)
+                       return ret;
+
+               pwm_imx27_sw_reset(chip);
        }
 
+       writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
+       writel(period_cycles, imx->mmio_base + MX3_PWMPR);
+
+       /*
+        * Store the duty cycle for future reference in cases where the
+        * MX3_PWMSAR register can't be read (i.e. when the PWM is disabled).
+        */
+       imx->duty_cycle = duty_cycles;
+
+       cr = MX3_PWMCR_PRESCALER_SET(prescale) |
+            MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEN | MX3_PWMCR_WAITEN |
+            FIELD_PREP(MX3_PWMCR_CLKSRC, MX3_PWMCR_CLKSRC_IPG_HIGH) |
+            MX3_PWMCR_DBGEN;
+
+       if (state->polarity == PWM_POLARITY_INVERSED)
+               cr |= FIELD_PREP(MX3_PWMCR_POUTC,
+                               MX3_PWMCR_POUTC_INVERTED);
+
+       if (state->enabled)
+               cr |= MX3_PWMCR_EN;
+
+       writel(cr, imx->mmio_base + MX3_PWMCR);
+
+       if (!state->enabled && cstate.enabled)
+               pwm_imx27_clk_disable_unprepare(chip);
+
        return 0;
 }
 
@@ -304,9 +319,13 @@ static int pwm_imx27_probe(struct platform_device *pdev)
 
        imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
        if (IS_ERR(imx->clk_ipg)) {
-               dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
-                               PTR_ERR(imx->clk_ipg));
-               return PTR_ERR(imx->clk_ipg);
+               int ret = PTR_ERR(imx->clk_ipg);
+
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "getting ipg clock failed with %d\n",
+                               ret);
+               return ret;
        }
 
        imx->clk_per = devm_clk_get(&pdev->dev, "per");
index b14376b..f2e57fc 100644 (file)
 #define  PERIOD_PERIOD(p)      ((p) & 0xffff)
 #define  PERIOD_PERIOD_MAX     0x10000
 #define  PERIOD_ACTIVE_HIGH    (3 << 16)
+#define  PERIOD_ACTIVE_LOW     (2 << 16)
+#define  PERIOD_INACTIVE_HIGH  (3 << 18)
 #define  PERIOD_INACTIVE_LOW   (2 << 18)
+#define  PERIOD_POLARITY_NORMAL        (PERIOD_ACTIVE_HIGH | PERIOD_INACTIVE_LOW)
+#define  PERIOD_POLARITY_INVERSE       (PERIOD_ACTIVE_LOW | PERIOD_INACTIVE_HIGH)
 #define  PERIOD_CDIV(div)      (((div) & 0x7) << 20)
 #define  PERIOD_CDIV_MAX       8
 
-static const unsigned int cdiv[PERIOD_CDIV_MAX] = {
-       1, 2, 4, 8, 16, 64, 256, 1024
+static const u8 cdiv_shift[PERIOD_CDIV_MAX] = {
+       0, 1, 2, 3, 4, 6, 8, 10
 };
 
 struct mxs_pwm_chip {
@@ -41,19 +45,34 @@ struct mxs_pwm_chip {
 
 #define to_mxs_pwm_chip(_chip) container_of(_chip, struct mxs_pwm_chip, chip)
 
-static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
-                         int duty_ns, int period_ns)
+static int mxs_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                        const struct pwm_state *state)
 {
        struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
        int ret, div = 0;
        unsigned int period_cycles, duty_cycles;
        unsigned long rate;
        unsigned long long c;
+       unsigned int pol_bits;
+
+       /*
+        * If the PWM channel is disabled, make sure to turn on the
+        * clock before calling clk_get_rate() and writing to the
+        * registers. Otherwise, just keep it enabled.
+        */
+       if (!pwm_is_enabled(pwm)) {
+               ret = clk_prepare_enable(mxs->clk);
+               if (ret)
+                       return ret;
+       }
+
+       if (!state->enabled && pwm_is_enabled(pwm))
+               writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR);
 
        rate = clk_get_rate(mxs->clk);
        while (1) {
-               c = rate / cdiv[div];
-               c = c * period_ns;
+               c = rate >> cdiv_shift[div];
+               c = c * state->period;
                do_div(c, 1000000000);
                if (c < PERIOD_PERIOD_MAX)
                        break;
@@ -63,62 +82,40 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        }
 
        period_cycles = c;
-       c *= duty_ns;
-       do_div(c, period_ns);
+       c *= state->duty_cycle;
+       do_div(c, state->period);
        duty_cycles = c;
 
        /*
-        * If the PWM channel is disabled, make sure to turn on the clock
-        * before writing the register. Otherwise, keep it enabled.
+        * The data sheet the says registers must be written to in
+        * this order (ACTIVEn, then PERIODn). Also, the new settings
+        * only take effect at the beginning of a new period, avoiding
+        * glitches.
         */
-       if (!pwm_is_enabled(pwm)) {
-               ret = clk_prepare_enable(mxs->clk);
-               if (ret)
-                       return ret;
-       }
 
+       pol_bits = state->polarity == PWM_POLARITY_NORMAL ?
+               PERIOD_POLARITY_NORMAL : PERIOD_POLARITY_INVERSE;
        writel(duty_cycles << 16,
-                       mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20);
-       writel(PERIOD_PERIOD(period_cycles) | PERIOD_ACTIVE_HIGH |
-              PERIOD_INACTIVE_LOW | PERIOD_CDIV(div),
-                       mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20);
-
-       /*
-        * If the PWM is not enabled, turn the clock off again to save power.
-        */
-       if (!pwm_is_enabled(pwm))
+              mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20);
+       writel(PERIOD_PERIOD(period_cycles) | pol_bits | PERIOD_CDIV(div),
+              mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20);
+
+       if (state->enabled) {
+               if (!pwm_is_enabled(pwm)) {
+                       /*
+                        * The clock was enabled above. Just enable
+                        * the channel in the control register.
+                        */
+                       writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET);
+               }
+       } else {
                clk_disable_unprepare(mxs->clk);
-
-       return 0;
-}
-
-static int mxs_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
-       int ret;
-
-       ret = clk_prepare_enable(mxs->clk);
-       if (ret)
-               return ret;
-
-       writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET);
-
+       }
        return 0;
 }
 
-static void mxs_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
-
-       writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR);
-
-       clk_disable_unprepare(mxs->clk);
-}
-
 static const struct pwm_ops mxs_pwm_ops = {
-       .config = mxs_pwm_config,
-       .enable = mxs_pwm_enable,
-       .disable = mxs_pwm_disable,
+       .apply = mxs_pwm_apply,
        .owner = THIS_MODULE,
 };
 
@@ -142,6 +139,8 @@ static int mxs_pwm_probe(struct platform_device *pdev)
 
        mxs->chip.dev = &pdev->dev;
        mxs->chip.ops = &mxs_pwm_ops;
+       mxs->chip.of_xlate = of_pwm_xlate_with_flags;
+       mxs->chip.of_pwm_n_cells = 3;
        mxs->chip.base = -1;
 
        ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
index 00772fc..88a3c56 100644 (file)
@@ -256,7 +256,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
        if (!timer_pdev) {
                dev_err(&pdev->dev, "Unable to find Timer pdev\n");
                ret = -ENODEV;
-               goto put;
+               goto err_find_timer_pdev;
        }
 
        timer_pdata = dev_get_platdata(&timer_pdev->dev);
@@ -264,7 +264,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
                dev_dbg(&pdev->dev,
                         "dmtimer pdata structure NULL, deferring probe\n");
                ret = -EPROBE_DEFER;
-               goto put;
+               goto err_platdata;
        }
 
        pdata = timer_pdata->timer_ops;
@@ -283,30 +283,25 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
            !pdata->write_counter) {
                dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
                ret = -EINVAL;
-               goto put;
+               goto err_platdata;
        }
 
        if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
                dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
                ret = -ENODEV;
-               goto put;
+               goto err_timer_property;
        }
 
        dm_timer = pdata->request_by_node(timer);
        if (!dm_timer) {
                ret = -EPROBE_DEFER;
-               goto put;
+               goto err_request_timer;
        }
 
-put:
-       of_node_put(timer);
-       if (ret < 0)
-               return ret;
-
        omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
        if (!omap) {
-               pdata->free(dm_timer);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_alloc_omap;
        }
 
        omap->pdata = pdata;
@@ -339,27 +334,56 @@ put:
        ret = pwmchip_add(&omap->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to register PWM\n");
-               omap->pdata->free(omap->dm_timer);
-               return ret;
+               goto err_pwmchip_add;
        }
 
+       of_node_put(timer);
+
        platform_set_drvdata(pdev, omap);
 
        return 0;
+
+err_pwmchip_add:
+
+       /*
+        * *omap is allocated using devm_kzalloc,
+        * so no free necessary here
+        */
+err_alloc_omap:
+
+       pdata->free(dm_timer);
+err_request_timer:
+
+err_timer_property:
+err_platdata:
+
+       put_device(&timer_pdev->dev);
+err_find_timer_pdev:
+
+       of_node_put(timer);
+
+       return ret;
 }
 
 static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
 {
        struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = pwmchip_remove(&omap->chip);
+       if (ret)
+               return ret;
 
        if (pm_runtime_active(&omap->dm_timer_pdev->dev))
                omap->pdata->stop(omap->dm_timer);
 
        omap->pdata->free(omap->dm_timer);
 
+       put_device(&omap->dm_timer_pdev->dev);
+
        mutex_destroy(&omap->mutex);
 
-       return pwmchip_remove(&omap->chip);
+       return 0;
 }
 
 static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
index 168684b..b07bdca 100644 (file)
@@ -159,13 +159,9 @@ static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
 static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
 {
        struct pca9685 *pca = gpiochip_get_data(gpio);
-       struct pwm_device *pwm;
 
        pca9685_pwm_gpio_set(gpio, offset, 0);
        pm_runtime_put(pca->chip.dev);
-       mutex_lock(&pca->lock);
-       pwm = &pca->chip.pwms[offset];
-       mutex_unlock(&pca->lock);
 }
 
 static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
index 852eb23..2685577 100644 (file)
@@ -3,6 +3,9 @@
  * R-Car PWM Timer driver
  *
  * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Limitations:
+ * - The hardware cannot generate a 0% duty cycle.
  */
 
 #include <linux/clk.h>
@@ -161,11 +164,9 @@ static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                          const struct pwm_state *state)
 {
        struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
-       struct pwm_state cur_state;
        int div, ret;
 
        /* This HW/driver only supports normal polarity */
-       pwm_get_state(pwm, &cur_state);
        if (state->polarity != PWM_POLARITY_NORMAL)
                return -ENOTSUPP;
 
index 7ff48c1..d3be944 100644 (file)
@@ -377,9 +377,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
        else
                regmap_update_bits(priv->regmap, TIM_CCMR2, mask, ccmr);
 
-       regmap_update_bits(priv->regmap, TIM_BDTR,
-                          TIM_BDTR_MOE | TIM_BDTR_AOE,
-                          TIM_BDTR_MOE | TIM_BDTR_AOE);
+       regmap_update_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE, TIM_BDTR_MOE);
 
        return 0;
 }
index 581d232..3e3efa6 100644 (file)
@@ -3,6 +3,10 @@
  * Driver for Allwinner sun4i Pulse Width Modulation Controller
  *
  * Copyright (C) 2014 Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * Limitations:
+ * - When outputing the source clock directly, the PWM logic will be bypassed
+ *   and the currently running period is not guaranteed to be completed
  */
 
 #include <linux/bitops.h>
@@ -16,6 +20,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
+#include <linux/reset.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/time.h>
@@ -72,12 +77,15 @@ static const u32 prescaler_table[] = {
 
 struct sun4i_pwm_data {
        bool has_prescaler_bypass;
+       bool has_direct_mod_clk_output;
        unsigned int npwm;
 };
 
 struct sun4i_pwm_chip {
        struct pwm_chip chip;
+       struct clk *bus_clk;
        struct clk *clk;
+       struct reset_control *rst;
        void __iomem *base;
        spinlock_t ctrl_lock;
        const struct sun4i_pwm_data *data;
@@ -115,6 +123,20 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
 
        val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
 
+       /*
+        * PWM chapter in H6 manual has a diagram which explains that if bypass
+        * bit is set, no other setting has any meaning. Even more, experiment
+        * proved that also enable bit is ignored in this case.
+        */
+       if ((val & BIT_CH(PWM_BYPASS, pwm->hwpwm)) &&
+           sun4i_pwm->data->has_direct_mod_clk_output) {
+               state->period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, clk_rate);
+               state->duty_cycle = DIV_ROUND_UP_ULL(state->period, 2);
+               state->polarity = PWM_POLARITY_NORMAL;
+               state->enabled = true;
+               return;
+       }
+
        if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
            sun4i_pwm->data->has_prescaler_bypass)
                prescaler = 1;
@@ -146,13 +168,24 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
 
 static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
                               const struct pwm_state *state,
-                              u32 *dty, u32 *prd, unsigned int *prsclr)
+                              u32 *dty, u32 *prd, unsigned int *prsclr,
+                              bool *bypass)
 {
        u64 clk_rate, div = 0;
-       unsigned int pval, prescaler = 0;
+       unsigned int prescaler = 0;
 
        clk_rate = clk_get_rate(sun4i_pwm->clk);
 
+       *bypass = sun4i_pwm->data->has_direct_mod_clk_output &&
+                 state->enabled &&
+                 (state->period * clk_rate >= NSEC_PER_SEC) &&
+                 (state->period * clk_rate < 2 * NSEC_PER_SEC) &&
+                 (state->duty_cycle * clk_rate * 2 >= NSEC_PER_SEC);
+
+       /* Skip calculation of other parameters if we bypass them */
+       if (*bypass)
+               return 0;
+
        if (sun4i_pwm->data->has_prescaler_bypass) {
                /* First, test without any prescaler when available */
                prescaler = PWM_PRESCAL_MASK;
@@ -170,9 +203,11 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
        if (prescaler == 0) {
                /* Go up from the first divider */
                for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) {
-                       if (!prescaler_table[prescaler])
+                       unsigned int pval = prescaler_table[prescaler];
+
+                       if (!pval)
                                continue;
-                       pval = prescaler_table[prescaler];
+
                        div = clk_rate;
                        do_div(div, pval);
                        div = div * state->period;
@@ -199,10 +234,11 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 {
        struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
        struct pwm_state cstate;
-       u32 ctrl;
+       u32 ctrl, duty = 0, period = 0, val;
        int ret;
-       unsigned int delay_us;
+       unsigned int delay_us, prescaler = 0;
        unsigned long now;
+       bool bypass;
 
        pwm_get_state(pwm, &cstate);
 
@@ -214,46 +250,52 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                }
        }
 
+       ret = sun4i_pwm_calculate(sun4i_pwm, state, &duty, &period, &prescaler,
+                                 &bypass);
+       if (ret) {
+               dev_err(chip->dev, "period exceeds the maximum value\n");
+               if (!cstate.enabled)
+                       clk_disable_unprepare(sun4i_pwm->clk);
+               return ret;
+       }
+
        spin_lock(&sun4i_pwm->ctrl_lock);
        ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
 
-       if ((cstate.period != state->period) ||
-           (cstate.duty_cycle != state->duty_cycle)) {
-               u32 period, duty, val;
-               unsigned int prescaler;
-
-               ret = sun4i_pwm_calculate(sun4i_pwm, state,
-                                         &duty, &period, &prescaler);
-               if (ret) {
-                       dev_err(chip->dev, "period exceeds the maximum value\n");
+       if (sun4i_pwm->data->has_direct_mod_clk_output) {
+               if (bypass) {
+                       ctrl |= BIT_CH(PWM_BYPASS, pwm->hwpwm);
+                       /* We can skip other parameter */
+                       sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
                        spin_unlock(&sun4i_pwm->ctrl_lock);
-                       if (!cstate.enabled)
-                               clk_disable_unprepare(sun4i_pwm->clk);
-                       return ret;
+                       return 0;
                }
 
-               if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
-                       /* Prescaler changed, the clock has to be gated */
-                       ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
-                       sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
+               ctrl &= ~BIT_CH(PWM_BYPASS, pwm->hwpwm);
+       }
 
-                       ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
-                       ctrl |= BIT_CH(prescaler, pwm->hwpwm);
-               }
+       if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
+               /* Prescaler changed, the clock has to be gated */
+               ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+               sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
 
-               val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
-               sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
-               sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
-                       usecs_to_jiffies(cstate.period / 1000 + 1);
-               sun4i_pwm->needs_delay[pwm->hwpwm] = true;
+               ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
+               ctrl |= BIT_CH(prescaler, pwm->hwpwm);
        }
 
+       val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
+       sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
+       sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
+               usecs_to_jiffies(cstate.period / 1000 + 1);
+       sun4i_pwm->needs_delay[pwm->hwpwm] = true;
+
        if (state->polarity != PWM_POLARITY_NORMAL)
                ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
        else
                ctrl |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
 
        ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+
        if (state->enabled) {
                ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
        } else if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
@@ -319,6 +361,12 @@ static const struct sun4i_pwm_data sun4i_pwm_single_bypass = {
        .npwm = 1,
 };
 
+static const struct sun4i_pwm_data sun50i_h6_pwm_data = {
+       .has_prescaler_bypass = true,
+       .has_direct_mod_clk_output = true,
+       .npwm = 2,
+};
+
 static const struct of_device_id sun4i_pwm_dt_ids[] = {
        {
                .compatible = "allwinner,sun4i-a10-pwm",
@@ -335,6 +383,9 @@ static const struct of_device_id sun4i_pwm_dt_ids[] = {
        }, {
                .compatible = "allwinner,sun8i-h3-pwm",
                .data = &sun4i_pwm_single_bypass,
+       }, {
+               .compatible = "allwinner,sun50i-h6-pwm",
+               .data = &sun50i_h6_pwm_data,
        }, {
                /* sentinel */
        },
@@ -360,9 +411,69 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
        if (IS_ERR(pwm->base))
                return PTR_ERR(pwm->base);
 
-       pwm->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(pwm->clk))
+       /*
+        * All hardware variants need a source clock that is divided and
+        * then feeds the counter that defines the output wave form. In the
+        * device tree this clock is either unnamed or called "mod".
+        * Some variants (e.g. H6) need another clock to access the
+        * hardware registers; this is called "bus".
+        * So we request "mod" first (and ignore the corner case that a
+        * parent provides a "mod" clock while the right one would be the
+        * unnamed one of the PWM device) and if this is not found we fall
+        * back to the first clock of the PWM.
+        */
+       pwm->clk = devm_clk_get_optional(&pdev->dev, "mod");
+       if (IS_ERR(pwm->clk)) {
+               if (PTR_ERR(pwm->clk) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "get mod clock failed %pe\n",
+                               pwm->clk);
                return PTR_ERR(pwm->clk);
+       }
+
+       if (!pwm->clk) {
+               pwm->clk = devm_clk_get(&pdev->dev, NULL);
+               if (IS_ERR(pwm->clk)) {
+                       if (PTR_ERR(pwm->clk) != -EPROBE_DEFER)
+                               dev_err(&pdev->dev, "get unnamed clock failed %pe\n",
+                                       pwm->clk);
+                       return PTR_ERR(pwm->clk);
+               }
+       }
+
+       pwm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
+       if (IS_ERR(pwm->bus_clk)) {
+               if (PTR_ERR(pwm->bus_clk) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "get bus clock failed %pe\n",
+                               pwm->bus_clk);
+               return PTR_ERR(pwm->bus_clk);
+       }
+
+       pwm->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+       if (IS_ERR(pwm->rst)) {
+               if (PTR_ERR(pwm->rst) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "get reset failed %pe\n",
+                               pwm->rst);
+               return PTR_ERR(pwm->rst);
+       }
+
+       /* Deassert reset */
+       ret = reset_control_deassert(pwm->rst);
+       if (ret) {
+               dev_err(&pdev->dev, "cannot deassert reset control: %pe\n",
+                       ERR_PTR(ret));
+               return ret;
+       }
+
+       /*
+        * We're keeping the bus clock on for the sake of simplicity.
+        * Actually it only needs to be on for hardware register accesses.
+        */
+       ret = clk_prepare_enable(pwm->bus_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "cannot prepare and enable bus_clk %pe\n",
+                       ERR_PTR(ret));
+               goto err_bus;
+       }
 
        pwm->chip.dev = &pdev->dev;
        pwm->chip.ops = &sun4i_pwm_ops;
@@ -376,19 +487,34 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
        ret = pwmchip_add(&pwm->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
-               return ret;
+               goto err_pwm_add;
        }
 
        platform_set_drvdata(pdev, pwm);
 
        return 0;
+
+err_pwm_add:
+       clk_disable_unprepare(pwm->bus_clk);
+err_bus:
+       reset_control_assert(pwm->rst);
+
+       return ret;
 }
 
 static int sun4i_pwm_remove(struct platform_device *pdev)
 {
        struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = pwmchip_remove(&pwm->chip);
+       if (ret)
+               return ret;
+
+       clk_disable_unprepare(pwm->bus_clk);
+       reset_control_assert(pwm->rst);
 
-       return pwmchip_remove(&pwm->chip);
+       return 0;
 }
 
 static struct platform_driver sun4i_pwm_driver = {
index 94afdde..de3862c 100644 (file)
@@ -23,6 +23,16 @@ config IMX_REMOTEPROC
 
          It's safe to say N here.
 
+config MTK_SCP
+       tristate "Mediatek SCP support"
+       depends on ARCH_MEDIATEK
+       select RPMSG_MTK_SCP
+       help
+         Say y here to support Mediatek's System Companion Processor (SCP) via
+         the remote processor framework.
+
+         It's safe to say N here.
+
 config OMAP_REMOTEPROC
        tristate "OMAP remoteproc support"
        depends on ARCH_OMAP4 || SOC_OMAP5
index 00f09e6..e30a1b1 100644 (file)
@@ -10,6 +10,7 @@ remoteproc-y                          += remoteproc_sysfs.o
 remoteproc-y                           += remoteproc_virtio.o
 remoteproc-y                           += remoteproc_elf_loader.o
 obj-$(CONFIG_IMX_REMOTEPROC)           += imx_rproc.o
+obj-$(CONFIG_MTK_SCP)                  += mtk_scp.o mtk_scp_ipi.o
 obj-$(CONFIG_OMAP_REMOTEPROC)          += omap_remoteproc.o
 obj-$(CONFIG_WKUP_M3_RPROC)            += wkup_m3_rproc.o
 obj-$(CONFIG_DA8XX_REMOTEPROC)         += da8xx_remoteproc.o
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
new file mode 100644 (file)
index 0000000..deb2009
--- /dev/null
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __RPROC_MTK_COMMON_H
+#define __RPROC_MTK_COMMON_H
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#define MT8183_SW_RSTN                 0x0
+#define MT8183_SW_RSTN_BIT             BIT(0)
+#define MT8183_SCP_TO_HOST             0x1C
+#define MT8183_SCP_IPC_INT_BIT         BIT(0)
+#define MT8183_SCP_WDT_INT_BIT         BIT(8)
+#define MT8183_HOST_TO_SCP             0x28
+#define MT8183_HOST_IPC_INT_BIT                BIT(0)
+#define MT8183_WDT_CFG                 0x84
+#define MT8183_SCP_CLK_SW_SEL          0x4000
+#define MT8183_SCP_CLK_DIV_SEL         0x4024
+#define MT8183_SCP_SRAM_PDN            0x402C
+#define MT8183_SCP_L1_SRAM_PD          0x4080
+#define MT8183_SCP_TCM_TAIL_SRAM_PD    0x4094
+
+#define MT8183_SCP_CACHE_SEL(x)                (0x14000 + (x) * 0x3000)
+#define MT8183_SCP_CACHE_CON           MT8183_SCP_CACHE_SEL(0)
+#define MT8183_SCP_DCACHE_CON          MT8183_SCP_CACHE_SEL(1)
+#define MT8183_SCP_CACHESIZE_8KB       BIT(8)
+#define MT8183_SCP_CACHE_CON_WAYEN     BIT(10)
+
+#define SCP_FW_VER_LEN                 32
+#define SCP_SHARE_BUFFER_SIZE          288
+
+struct scp_run {
+       u32 signaled;
+       s8 fw_ver[SCP_FW_VER_LEN];
+       u32 dec_capability;
+       u32 enc_capability;
+       wait_queue_head_t wq;
+};
+
+struct scp_ipi_desc {
+       /* For protecting handler. */
+       struct mutex lock;
+       scp_ipi_handler_t handler;
+       void *priv;
+};
+
+struct mtk_scp {
+       struct device *dev;
+       struct rproc *rproc;
+       struct clk *clk;
+       void __iomem *reg_base;
+       void __iomem *sram_base;
+       size_t sram_size;
+
+       struct mtk_share_obj __iomem *recv_buf;
+       struct mtk_share_obj __iomem *send_buf;
+       struct scp_run run;
+       /* To prevent multiple ipi_send run concurrently. */
+       struct mutex send_lock;
+       struct scp_ipi_desc ipi_desc[SCP_IPI_MAX];
+       bool ipi_id_ack[SCP_IPI_MAX];
+       wait_queue_head_t ack_wq;
+
+       void __iomem *cpu_addr;
+       phys_addr_t phys_addr;
+       size_t dram_size;
+
+       struct rproc_subdev *rpmsg_subdev;
+};
+
+/**
+ * struct mtk_share_obj - SRAM buffer shared with AP and SCP
+ *
+ * @id:                IPI id
+ * @len:       share buffer length
+ * @share_buf: share buffer data
+ */
+struct mtk_share_obj {
+       u32 id;
+       u32 len;
+       u8 share_buf[SCP_SHARE_BUFFER_SIZE];
+};
+
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len);
+void scp_ipi_lock(struct mtk_scp *scp, u32 id);
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id);
+
+#endif
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
new file mode 100644 (file)
index 0000000..7ccdf64
--- /dev/null
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+
+#include "mtk_common.h"
+#include "remoteproc_internal.h"
+
+#define MAX_CODE_SIZE 0x500000
+#define SCP_FW_END 0x7C000
+
+/**
+ * scp_get() - get a reference to SCP.
+ *
+ * @pdev:      the platform device of the module requesting SCP platform
+ *             device for using SCP API.
+ *
+ * Return: Return NULL if failed.  otherwise reference to SCP.
+ **/
+struct mtk_scp *scp_get(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *scp_node;
+       struct platform_device *scp_pdev;
+
+       scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
+       if (!scp_node) {
+               dev_err(dev, "can't get SCP node\n");
+               return NULL;
+       }
+
+       scp_pdev = of_find_device_by_node(scp_node);
+       of_node_put(scp_node);
+
+       if (WARN_ON(!scp_pdev)) {
+               dev_err(dev, "SCP pdev failed\n");
+               return NULL;
+       }
+
+       return platform_get_drvdata(scp_pdev);
+}
+EXPORT_SYMBOL_GPL(scp_get);
+
+/**
+ * scp_put() - "free" the SCP
+ *
+ * @scp:       mtk_scp structure from scp_get().
+ **/
+void scp_put(struct mtk_scp *scp)
+{
+       put_device(scp->dev);
+}
+EXPORT_SYMBOL_GPL(scp_put);
+
+static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
+{
+       dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
+       rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
+}
+
+static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
+{
+       struct mtk_scp *scp = (struct mtk_scp *)priv;
+       struct scp_run *run = (struct scp_run *)data;
+
+       scp->run.signaled = run->signaled;
+       strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
+       scp->run.dec_capability = run->dec_capability;
+       scp->run.enc_capability = run->enc_capability;
+       wake_up_interruptible(&scp->run.wq);
+}
+
+static void scp_ipi_handler(struct mtk_scp *scp)
+{
+       struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
+       struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
+       u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
+       scp_ipi_handler_t handler;
+       u32 id = readl(&rcv_obj->id);
+       u32 len = readl(&rcv_obj->len);
+
+       if (len > SCP_SHARE_BUFFER_SIZE) {
+               dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
+                       SCP_SHARE_BUFFER_SIZE);
+               return;
+       }
+       if (id >= SCP_IPI_MAX) {
+               dev_err(scp->dev, "No such ipi id = %d\n", id);
+               return;
+       }
+
+       scp_ipi_lock(scp, id);
+       handler = ipi_desc[id].handler;
+       if (!handler) {
+               dev_err(scp->dev, "No such ipi id = %d\n", id);
+               scp_ipi_unlock(scp, id);
+               return;
+       }
+
+       memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
+       handler(tmp_data, len, ipi_desc[id].priv);
+       scp_ipi_unlock(scp, id);
+
+       scp->ipi_id_ack[id] = true;
+       wake_up(&scp->ack_wq);
+}
+
+static int scp_ipi_init(struct mtk_scp *scp)
+{
+       size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj);
+       size_t recv_offset = send_offset - sizeof(struct mtk_share_obj);
+
+       /* Disable SCP to host interrupt */
+       writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+       /* shared buffer initialization */
+       scp->recv_buf =
+               (struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset);
+       scp->send_buf =
+               (struct mtk_share_obj __iomem *)(scp->sram_base + send_offset);
+       memset_io(scp->recv_buf, 0, sizeof(scp->recv_buf));
+       memset_io(scp->send_buf, 0, sizeof(scp->send_buf));
+
+       return 0;
+}
+
+static void scp_reset_assert(const struct mtk_scp *scp)
+{
+       u32 val;
+
+       val = readl(scp->reg_base + MT8183_SW_RSTN);
+       val &= ~MT8183_SW_RSTN_BIT;
+       writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static void scp_reset_deassert(const struct mtk_scp *scp)
+{
+       u32 val;
+
+       val = readl(scp->reg_base + MT8183_SW_RSTN);
+       val |= MT8183_SW_RSTN_BIT;
+       writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static irqreturn_t scp_irq_handler(int irq, void *priv)
+{
+       struct mtk_scp *scp = priv;
+       u32 scp_to_host;
+       int ret;
+
+       ret = clk_prepare_enable(scp->clk);
+       if (ret) {
+               dev_err(scp->dev, "failed to enable clocks\n");
+               return IRQ_NONE;
+       }
+
+       scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
+       if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
+               scp_ipi_handler(scp);
+       else
+               scp_wdt_handler(scp, scp_to_host);
+
+       /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
+       writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
+              scp->reg_base + MT8183_SCP_TO_HOST);
+       clk_disable_unprepare(scp->clk);
+
+       return IRQ_HANDLED;
+}
+
+static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+       struct device *dev = &rproc->dev;
+       struct elf32_hdr *ehdr;
+       struct elf32_phdr *phdr;
+       int i, ret = 0;
+       const u8 *elf_data = fw->data;
+
+       ehdr = (struct elf32_hdr *)elf_data;
+       phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+       /* go through the available ELF segments */
+       for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+               u32 da = phdr->p_paddr;
+               u32 memsz = phdr->p_memsz;
+               u32 filesz = phdr->p_filesz;
+               u32 offset = phdr->p_offset;
+               void __iomem *ptr;
+
+               if (phdr->p_type != PT_LOAD)
+                       continue;
+
+               dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+                       phdr->p_type, da, memsz, filesz);
+
+               if (filesz > memsz) {
+                       dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+                               filesz, memsz);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (offset + filesz > fw->size) {
+                       dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+                               offset + filesz, fw->size);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               /* grab the kernel address for this device address */
+               ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz);
+               if (!ptr) {
+                       dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               /* put the segment where the remote processor expects it */
+               if (phdr->p_filesz)
+                       scp_memcpy_aligned(ptr, elf_data + phdr->p_offset,
+                                          filesz);
+       }
+
+       return ret;
+}
+
+static int scp_load(struct rproc *rproc, const struct firmware *fw)
+{
+       const struct mtk_scp *scp = rproc->priv;
+       struct device *dev = scp->dev;
+       int ret;
+
+       ret = clk_prepare_enable(scp->clk);
+       if (ret) {
+               dev_err(dev, "failed to enable clocks\n");
+               return ret;
+       }
+
+       /* Hold SCP in reset while loading FW. */
+       scp_reset_assert(scp);
+
+       /* Reset clocks before loading FW */
+       writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+       writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+       /* Initialize TCM before loading FW. */
+       writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+       writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+
+       /* Turn on the power of SCP's SRAM before using it. */
+       writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
+
+       /*
+        * Set I-cache and D-cache size before loading SCP FW.
+        * SCP SRAM logical address may change when cache size setting differs.
+        */
+       writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+              scp->reg_base + MT8183_SCP_CACHE_CON);
+       writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
+       ret = scp_elf_load_segments(rproc, fw);
+       clk_disable_unprepare(scp->clk);
+
+       return ret;
+}
+
+static int scp_start(struct rproc *rproc)
+{
+       struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+       struct device *dev = scp->dev;
+       struct scp_run *run = &scp->run;
+       int ret;
+
+       ret = clk_prepare_enable(scp->clk);
+       if (ret) {
+               dev_err(dev, "failed to enable clocks\n");
+               return ret;
+       }
+
+       run->signaled = false;
+
+       scp_reset_deassert(scp);
+
+       ret = wait_event_interruptible_timeout(
+                                       run->wq,
+                                       run->signaled,
+                                       msecs_to_jiffies(2000));
+
+       if (ret == 0) {
+               dev_err(dev, "wait SCP initialization timeout!\n");
+               ret = -ETIME;
+               goto stop;
+       }
+       if (ret == -ERESTARTSYS) {
+               dev_err(dev, "wait SCP interrupted by a signal!\n");
+               goto stop;
+       }
+       clk_disable_unprepare(scp->clk);
+       dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
+
+       return 0;
+
+stop:
+       scp_reset_assert(scp);
+       clk_disable_unprepare(scp->clk);
+       return ret;
+}
+
+static void *scp_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+       struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+       int offset;
+
+       if (da < scp->sram_size) {
+               offset = da;
+               if (offset >= 0 && (offset + len) < scp->sram_size)
+                       return (void __force *)scp->sram_base + offset;
+       } else {
+               offset = da - scp->phys_addr;
+               if (offset >= 0 && (offset + len) < scp->dram_size)
+                       return (void __force *)scp->cpu_addr + offset;
+       }
+
+       return NULL;
+}
+
+static int scp_stop(struct rproc *rproc)
+{
+       struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+       int ret;
+
+       ret = clk_prepare_enable(scp->clk);
+       if (ret) {
+               dev_err(scp->dev, "failed to enable clocks\n");
+               return ret;
+       }
+
+       scp_reset_assert(scp);
+       /* Disable SCP watchdog */
+       writel(0, scp->reg_base + MT8183_WDT_CFG);
+       clk_disable_unprepare(scp->clk);
+
+       return 0;
+}
+
+static const struct rproc_ops scp_ops = {
+       .start          = scp_start,
+       .stop           = scp_stop,
+       .load           = scp_load,
+       .da_to_va       = scp_da_to_va,
+};
+
+/**
+ * scp_get_device() - get device struct of SCP
+ *
+ * @scp:       mtk_scp structure
+ **/
+struct device *scp_get_device(struct mtk_scp *scp)
+{
+       return scp->dev;
+}
+EXPORT_SYMBOL_GPL(scp_get_device);
+
+/**
+ * scp_get_rproc() - get rproc struct of SCP
+ *
+ * @scp:       mtk_scp structure
+ **/
+struct rproc *scp_get_rproc(struct mtk_scp *scp)
+{
+       return scp->rproc;
+}
+EXPORT_SYMBOL_GPL(scp_get_rproc);
+
+/**
+ * scp_get_vdec_hw_capa() - get video decoder hardware capability
+ *
+ * @scp:       mtk_scp structure
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
+{
+       return scp->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
+
+/**
+ * scp_get_venc_hw_capa() - get video encoder hardware capability
+ *
+ * @scp:       mtk_scp structure
+ *
+ * Return: video encoder hardware capability
+ **/
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
+{
+       return scp->run.enc_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
+
+/**
+ * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
+ *
+ * @scp:       mtk_scp structure
+ * @mem_addr:  SCP views memory address
+ *
+ * Mapping the SCP's SRAM address /
+ * DMEM (Data Extended Memory) memory address /
+ * Working buffer memory address to
+ * kernel virtual address.
+ *
+ * Return: Return ERR_PTR(-EINVAL) if mapping failed,
+ * otherwise the mapped kernel virtual address
+ **/
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
+{
+       void *ptr;
+
+       ptr = scp_da_to_va(scp->rproc, mem_addr, 0);
+       if (!ptr)
+               return ERR_PTR(-EINVAL);
+
+       return ptr;
+}
+EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
+
+static int scp_map_memory_region(struct mtk_scp *scp)
+{
+       int ret;
+
+       ret = of_reserved_mem_device_init(scp->dev);
+       if (ret) {
+               dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
+               return -ENOMEM;
+       }
+
+       /* Reserved SCP code size */
+       scp->dram_size = MAX_CODE_SIZE;
+       scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
+                                          &scp->phys_addr, GFP_KERNEL);
+       if (!scp->cpu_addr)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void scp_unmap_memory_region(struct mtk_scp *scp)
+{
+       dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
+                         scp->phys_addr);
+       of_reserved_mem_device_release(scp->dev);
+}
+
+static int scp_register_ipi(struct platform_device *pdev, u32 id,
+                           ipi_handler_t handler, void *priv)
+{
+       struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+       return scp_ipi_register(scp, id, handler, priv);
+}
+
+static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
+{
+       struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+       scp_ipi_unregister(scp, id);
+}
+
+static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
+                       unsigned int len, unsigned int wait)
+{
+       struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+       return scp_ipi_send(scp, id, buf, len, wait);
+}
+
+static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
+       .send_ipi = scp_send_ipi,
+       .register_ipi = scp_register_ipi,
+       .unregister_ipi = scp_unregister_ipi,
+       .ns_ipi_id = SCP_IPI_NS_SERVICE,
+};
+
+static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
+{
+       scp->rpmsg_subdev =
+               mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
+                                             &mtk_scp_rpmsg_info);
+       if (scp->rpmsg_subdev)
+               rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
+}
+
+static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
+{
+       if (scp->rpmsg_subdev) {
+               rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
+               mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
+               scp->rpmsg_subdev = NULL;
+       }
+}
+
+static int scp_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct mtk_scp *scp;
+       struct rproc *rproc;
+       struct resource *res;
+       char *fw_name = "scp.img";
+       int ret, i;
+
+       rproc = rproc_alloc(dev,
+                           np->name,
+                           &scp_ops,
+                           fw_name,
+                           sizeof(*scp));
+       if (!rproc) {
+               dev_err(dev, "unable to allocate remoteproc\n");
+               return -ENOMEM;
+       }
+
+       scp = (struct mtk_scp *)rproc->priv;
+       scp->rproc = rproc;
+       scp->dev = dev;
+       platform_set_drvdata(pdev, scp);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+       scp->sram_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR((__force void *)scp->sram_base)) {
+               dev_err(dev, "Failed to parse and map sram memory\n");
+               ret = PTR_ERR((__force void *)scp->sram_base);
+               goto free_rproc;
+       }
+       scp->sram_size = resource_size(res);
+
+       mutex_init(&scp->send_lock);
+       for (i = 0; i < SCP_IPI_MAX; i++)
+               mutex_init(&scp->ipi_desc[i].lock);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+       scp->reg_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR((__force void *)scp->reg_base)) {
+               dev_err(dev, "Failed to parse and map cfg memory\n");
+               ret = PTR_ERR((__force void *)scp->reg_base);
+               goto destroy_mutex;
+       }
+
+       ret = scp_map_memory_region(scp);
+       if (ret)
+               goto destroy_mutex;
+
+       scp->clk = devm_clk_get(dev, "main");
+       if (IS_ERR(scp->clk)) {
+               dev_err(dev, "Failed to get clock\n");
+               ret = PTR_ERR(scp->clk);
+               goto release_dev_mem;
+       }
+
+       ret = clk_prepare_enable(scp->clk);
+       if (ret) {
+               dev_err(dev, "failed to enable clocks\n");
+               goto release_dev_mem;
+       }
+
+       ret = scp_ipi_init(scp);
+       clk_disable_unprepare(scp->clk);
+       if (ret) {
+               dev_err(dev, "Failed to init ipi\n");
+               goto release_dev_mem;
+       }
+
+       /* register SCP initialization IPI */
+       ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
+       if (ret) {
+               dev_err(dev, "Failed to register IPI_SCP_INIT\n");
+               goto release_dev_mem;
+       }
+
+       init_waitqueue_head(&scp->run.wq);
+       init_waitqueue_head(&scp->ack_wq);
+
+       scp_add_rpmsg_subdev(scp);
+
+       ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
+                                       scp_irq_handler, IRQF_ONESHOT,
+                                       pdev->name, scp);
+
+       if (ret) {
+               dev_err(dev, "failed to request irq\n");
+               goto remove_subdev;
+       }
+
+       ret = rproc_add(rproc);
+       if (ret)
+               goto remove_subdev;
+
+       return 0;
+
+remove_subdev:
+       scp_remove_rpmsg_subdev(scp);
+       scp_ipi_unregister(scp, SCP_IPI_INIT);
+release_dev_mem:
+       scp_unmap_memory_region(scp);
+destroy_mutex:
+       for (i = 0; i < SCP_IPI_MAX; i++)
+               mutex_destroy(&scp->ipi_desc[i].lock);
+       mutex_destroy(&scp->send_lock);
+free_rproc:
+       rproc_free(rproc);
+
+       return ret;
+}
+
+static int scp_remove(struct platform_device *pdev)
+{
+       struct mtk_scp *scp = platform_get_drvdata(pdev);
+       int i;
+
+       rproc_del(scp->rproc);
+       scp_remove_rpmsg_subdev(scp);
+       scp_ipi_unregister(scp, SCP_IPI_INIT);
+       scp_unmap_memory_region(scp);
+       for (i = 0; i < SCP_IPI_MAX; i++)
+               mutex_destroy(&scp->ipi_desc[i].lock);
+       mutex_destroy(&scp->send_lock);
+       rproc_free(scp->rproc);
+
+       return 0;
+}
+
+static const struct of_device_id mtk_scp_of_match[] = {
+       { .compatible = "mediatek,mt8183-scp"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
+
+static struct platform_driver mtk_scp_driver = {
+       .probe = scp_probe,
+       .remove = scp_remove,
+       .driver = {
+               .name = "mtk-scp",
+               .of_match_table = of_match_ptr(mtk_scp_of_match),
+       },
+};
+
+module_platform_driver(mtk_scp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek SCP control driver");
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
new file mode 100644 (file)
index 0000000..3d3d872
--- /dev/null
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#include "mtk_common.h"
+
+/**
+ * scp_ipi_register() - register an ipi function
+ *
+ * @scp:       mtk_scp structure
+ * @id:                IPI ID
+ * @handler:   IPI handler
+ * @priv:      private data for IPI handler
+ *
+ * Register an ipi function to receive ipi interrupt from SCP.
+ *
+ * Returns 0 if ipi registers successfully, -error on error.
+ */
+int scp_ipi_register(struct mtk_scp *scp,
+                    u32 id,
+                    scp_ipi_handler_t handler,
+                    void *priv)
+{
+       if (!scp) {
+               dev_err(scp->dev, "scp device is not ready\n");
+               return -EPROBE_DEFER;
+       }
+
+       if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
+               return -EINVAL;
+
+       scp_ipi_lock(scp, id);
+       scp->ipi_desc[id].handler = handler;
+       scp->ipi_desc[id].priv = priv;
+       scp_ipi_unlock(scp, id);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_register);
+
+/**
+ * scp_ipi_unregister() - unregister an ipi function
+ *
+ * @scp:       mtk_scp structure
+ * @id:                IPI ID
+ *
+ * Unregister an ipi function to receive ipi interrupt from SCP.
+ */
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id)
+{
+       if (!scp)
+               return;
+
+       if (WARN_ON(id >= SCP_IPI_MAX))
+               return;
+
+       scp_ipi_lock(scp, id);
+       scp->ipi_desc[id].handler = NULL;
+       scp->ipi_desc[id].priv = NULL;
+       scp_ipi_unlock(scp, id);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unregister);
+
+/*
+ * scp_memcpy_aligned() - Copy src to dst, where dst is in SCP SRAM region.
+ *
+ * @dst:       Pointer to the destination buffer, should be in SCP SRAM region.
+ * @src:       Pointer to the source buffer.
+ * @len:       Length of the source buffer to be copied.
+ *
+ * Since AP access of SCP SRAM don't support byte write, this always write a
+ * full word at a time, and may cause some extra bytes to be written at the
+ * beginning & ending of dst.
+ */
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len)
+{
+       void __iomem *ptr;
+       u32 val;
+       unsigned int i = 0, remain;
+
+       if (!IS_ALIGNED((unsigned long)dst, 4)) {
+               ptr = (void __iomem *)ALIGN_DOWN((unsigned long)dst, 4);
+               i = 4 - (dst - ptr);
+               val = readl_relaxed(ptr);
+               memcpy((u8 *)&val + (4 - i), src, i);
+               writel_relaxed(val, ptr);
+       }
+
+       __iowrite32_copy(dst + i, src + i, (len - i) / 4);
+       remain = (len - i) % 4;
+
+       if (remain > 0) {
+               val = readl_relaxed(dst + len - remain);
+               memcpy(&val, src + len - remain, remain);
+               writel_relaxed(val, dst + len - remain);
+       }
+}
+EXPORT_SYMBOL_GPL(scp_memcpy_aligned);
+
+/**
+ * scp_ipi_lock() - Lock before operations of an IPI ID
+ *
+ * @scp:       mtk_scp structure
+ * @id:                IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_lock(struct mtk_scp *scp, u32 id)
+{
+       if (WARN_ON(id >= SCP_IPI_MAX))
+               return;
+       mutex_lock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_lock);
+
+/**
+ * scp_ipi_lock() - Unlock after operations of an IPI ID
+ *
+ * @scp:       mtk_scp structure
+ * @id:                IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id)
+{
+       if (WARN_ON(id >= SCP_IPI_MAX))
+               return;
+       mutex_unlock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unlock);
+
+/**
+ * scp_ipi_send() - send data from AP to scp.
+ *
+ * @scp:       mtk_scp structure
+ * @id:                IPI ID
+ * @buf:       the data buffer
+ * @len:       the data buffer length
+ * @wait:      number of msecs to wait for ack. 0 to skip waiting.
+ *
+ * This function is thread-safe. When this function returns,
+ * SCP has received the data and starts the processing.
+ * When the processing completes, IPI handler registered
+ * by scp_ipi_register will be called in interrupt context.
+ *
+ * Returns 0 if sending data successfully, -error on error.
+ **/
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+                unsigned int wait)
+{
+       struct mtk_share_obj __iomem *send_obj = scp->send_buf;
+       unsigned long timeout;
+       int ret;
+
+       if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) ||
+           WARN_ON(id == SCP_IPI_NS_SERVICE) ||
+           WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
+               return -EINVAL;
+
+       mutex_lock(&scp->send_lock);
+
+       ret = clk_prepare_enable(scp->clk);
+       if (ret) {
+               dev_err(scp->dev, "failed to enable clock\n");
+               goto unlock_mutex;
+       }
+
+        /* Wait until SCP receives the last command */
+       timeout = jiffies + msecs_to_jiffies(2000);
+       do {
+               if (time_after(jiffies, timeout)) {
+                       dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
+                       ret = -ETIMEDOUT;
+                       goto clock_disable;
+               }
+       } while (readl(scp->reg_base + MT8183_HOST_TO_SCP));
+
+       scp_memcpy_aligned(send_obj->share_buf, buf, len);
+
+       writel(len, &send_obj->len);
+       writel(id, &send_obj->id);
+
+       scp->ipi_id_ack[id] = false;
+       /* send the command to SCP */
+       writel(MT8183_HOST_IPC_INT_BIT, scp->reg_base + MT8183_HOST_TO_SCP);
+
+       if (wait) {
+               /* wait for SCP's ACK */
+               timeout = msecs_to_jiffies(wait);
+               ret = wait_event_timeout(scp->ack_wq,
+                                        scp->ipi_id_ack[id],
+                                        timeout);
+               scp->ipi_id_ack[id] = false;
+               if (WARN(!ret, "scp ipi %d ack time out !", id))
+                       ret = -EIO;
+               else
+                       ret = 0;
+       }
+
+clock_disable:
+       clk_disable_unprepare(scp->clk);
+unlock_mutex:
+       mutex_unlock(&scp->send_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_send);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp IPI interface");
index 471128a..a1cc9cb 100644 (file)
 #define AXI_HALTREQ_REG                        0x0
 #define AXI_HALTACK_REG                        0x4
 #define AXI_IDLE_REG                   0x8
+#define NAV_AXI_HALTREQ_BIT            BIT(0)
+#define NAV_AXI_HALTACK_BIT            BIT(1)
+#define NAV_AXI_IDLE_BIT               BIT(2)
+#define AXI_GATING_VALID_OVERRIDE      BIT(0)
 
-#define HALT_ACK_TIMEOUT_MS            100
+#define HALT_ACK_TIMEOUT_US            100000
+#define NAV_HALT_ACK_TIMEOUT_US                200
 
 /* QDSP6SS_RESET */
 #define Q6SS_STOP_CORE                 BIT(0)
 #define Q6SS_CORE_ARES                 BIT(1)
 #define Q6SS_BUS_ARES_ENABLE           BIT(2)
 
+/* QDSP6SS CBCR */
+#define Q6SS_CBCR_CLKEN                        BIT(0)
+#define Q6SS_CBCR_CLKOFF               BIT(31)
+#define Q6SS_CBCR_TIMEOUT_US           200
+
 /* QDSP6SS_GFMUX_CTL */
 #define Q6SS_CLK_ENABLE                        BIT(1)
 
 #define QDSP6v56_BHS_ON                BIT(24)
 #define QDSP6v56_CLAMP_WL              BIT(21)
 #define QDSP6v56_CLAMP_QMC_MEM         BIT(22)
-#define HALT_CHECK_MAX_LOOPS           200
 #define QDSP6SS_XO_CBCR                0x0038
 #define QDSP6SS_ACC_OVERRIDE_VAL               0x20
 
 /* QDSP6v65 parameters */
+#define QDSP6SS_CORE_CBCR              0x20
 #define QDSP6SS_SLEEP                   0x3C
 #define QDSP6SS_BOOT_CORE_START         0x400
 #define QDSP6SS_BOOT_CMD                0x404
-#define SLEEP_CHECK_MAX_LOOPS           200
+#define QDSP6SS_BOOT_STATUS            0x408
+#define BOOT_STATUS_TIMEOUT_US         200
 #define BOOT_FSM_TIMEOUT                10000
 
 struct reg_info {
@@ -131,6 +142,7 @@ struct rproc_hexagon_res {
        int version;
        bool need_mem_protection;
        bool has_alt_reset;
+       bool has_halt_nav;
 };
 
 struct q6v5 {
@@ -141,9 +153,14 @@ struct q6v5 {
        void __iomem *rmb_base;
 
        struct regmap *halt_map;
+       struct regmap *halt_nav_map;
+       struct regmap *conn_map;
+
        u32 halt_q6;
        u32 halt_modem;
        u32 halt_nc;
+       u32 halt_nav;
+       u32 conn_box;
 
        struct reset_control *mss_restart;
        struct reset_control *pdc_reset;
@@ -187,6 +204,7 @@ struct q6v5 {
        struct qcom_sysmon *sysmon;
        bool need_mem_protection;
        bool has_alt_reset;
+       bool has_halt_nav;
        int mpss_perm;
        int mba_perm;
        const char *hexagon_mdt_image;
@@ -198,6 +216,7 @@ enum {
        MSS_MSM8974,
        MSS_MSM8996,
        MSS_MSM8998,
+       MSS_SC7180,
        MSS_SDM845,
 };
 
@@ -396,6 +415,26 @@ static int q6v5_reset_assert(struct q6v5 *qproc)
                reset_control_assert(qproc->pdc_reset);
                ret = reset_control_reset(qproc->mss_restart);
                reset_control_deassert(qproc->pdc_reset);
+       } else if (qproc->has_halt_nav) {
+               /*
+                * When the AXI pipeline is being reset with the Q6 modem partly
+                * operational there is possibility of AXI valid signal to
+                * glitch, leading to spurious transactions and Q6 hangs. A work
+                * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
+                * BIT before triggering Q6 MSS reset. Both the HALTREQ and
+                * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
+                * followed by a MSS deassert, while holding the PDC reset.
+                */
+               reset_control_assert(qproc->pdc_reset);
+               regmap_update_bits(qproc->conn_map, qproc->conn_box,
+                                  AXI_GATING_VALID_OVERRIDE, 1);
+               regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav,
+                                  NAV_AXI_HALTREQ_BIT, 0);
+               reset_control_assert(qproc->mss_restart);
+               reset_control_deassert(qproc->pdc_reset);
+               regmap_update_bits(qproc->conn_map, qproc->conn_box,
+                                  AXI_GATING_VALID_OVERRIDE, 0);
+               ret = reset_control_deassert(qproc->mss_restart);
        } else {
                ret = reset_control_assert(qproc->mss_restart);
        }
@@ -413,6 +452,8 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
                ret = reset_control_reset(qproc->mss_restart);
                writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
                reset_control_deassert(qproc->pdc_reset);
+       } else if (qproc->has_halt_nav) {
+               ret = reset_control_reset(qproc->mss_restart);
        } else {
                ret = reset_control_deassert(qproc->mss_restart);
        }
@@ -474,12 +515,12 @@ static int q6v5proc_reset(struct q6v5 *qproc)
 
        if (qproc->version == MSS_SDM845) {
                val = readl(qproc->reg_base + QDSP6SS_SLEEP);
-               val |= 0x1;
+               val |= Q6SS_CBCR_CLKEN;
                writel(val, qproc->reg_base + QDSP6SS_SLEEP);
 
                ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
-                                        val, !(val & BIT(31)), 1,
-                                        SLEEP_CHECK_MAX_LOOPS);
+                                        val, !(val & Q6SS_CBCR_CLKOFF), 1,
+                                        Q6SS_CBCR_TIMEOUT_US);
                if (ret) {
                        dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
                        return -ETIMEDOUT;
@@ -499,6 +540,54 @@ static int q6v5proc_reset(struct q6v5 *qproc)
                        return ret;
                }
 
+               goto pbl_wait;
+       } else if (qproc->version == MSS_SC7180) {
+               val = readl(qproc->reg_base + QDSP6SS_SLEEP);
+               val |= Q6SS_CBCR_CLKEN;
+               writel(val, qproc->reg_base + QDSP6SS_SLEEP);
+
+               ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
+                                        val, !(val & Q6SS_CBCR_CLKOFF), 1,
+                                        Q6SS_CBCR_TIMEOUT_US);
+               if (ret) {
+                       dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
+                       return -ETIMEDOUT;
+               }
+
+               /* Turn on the XO clock needed for PLL setup */
+               val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+               val |= Q6SS_CBCR_CLKEN;
+               writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+               ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+                                        val, !(val & Q6SS_CBCR_CLKOFF), 1,
+                                        Q6SS_CBCR_TIMEOUT_US);
+               if (ret) {
+                       dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
+                       return -ETIMEDOUT;
+               }
+
+               /* Configure Q6 core CBCR to auto-enable after reset sequence */
+               val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
+               val |= Q6SS_CBCR_CLKEN;
+               writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
+
+               /* De-assert the Q6 stop core signal */
+               writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
+
+               /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
+               writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
+
+               /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
+               ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
+                                        val, (val & BIT(0)) != 0, 1,
+                                        BOOT_STATUS_TIMEOUT_US);
+               if (ret) {
+                       dev_err(qproc->dev, "Boot FSM failed to complete.\n");
+                       /* Reset the modem so that boot FSM is in reset state */
+                       q6v5_reset_deassert(qproc);
+                       return ret;
+               }
                goto pbl_wait;
        } else if (qproc->version == MSS_MSM8996 ||
                   qproc->version == MSS_MSM8998) {
@@ -515,13 +604,13 @@ static int q6v5proc_reset(struct q6v5 *qproc)
 
                /* BHS require xo cbcr to be enabled */
                val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
-               val |= 0x1;
+               val |= Q6SS_CBCR_CLKEN;
                writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
 
                /* Read CLKOFF bit to go low indicating CLK is enabled */
                ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
-                                        val, !(val & BIT(31)), 1,
-                                        HALT_CHECK_MAX_LOOPS);
+                                        val, !(val & Q6SS_CBCR_CLKOFF), 1,
+                                        Q6SS_CBCR_TIMEOUT_US);
                if (ret) {
                        dev_err(qproc->dev,
                                "xo cbcr enabling timed out (rc:%d)\n", ret);
@@ -637,7 +726,6 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
                                   struct regmap *halt_map,
                                   u32 offset)
 {
-       unsigned long timeout;
        unsigned int val;
        int ret;
 
@@ -650,14 +738,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
        regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
 
        /* Wait for halt */
-       timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
-       for (;;) {
-               ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
-               if (ret || val || time_after(jiffies, timeout))
-                       break;
-
-               msleep(1);
-       }
+       regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
+                                val, 1000, HALT_ACK_TIMEOUT_US);
 
        ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
        if (ret || !val)
@@ -667,6 +749,32 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
        regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
 }
 
+static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc,
+                                      struct regmap *halt_map,
+                                      u32 offset)
+{
+       unsigned int val;
+       int ret;
+
+       /* Check if we're already idle */
+       ret = regmap_read(halt_map, offset, &val);
+       if (!ret && (val & NAV_AXI_IDLE_BIT))
+               return;
+
+       /* Assert halt request */
+       regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT,
+                          NAV_AXI_HALTREQ_BIT);
+
+       /* Wait for halt ack*/
+       regmap_read_poll_timeout(halt_map, offset, val,
+                                (val & NAV_AXI_HALTACK_BIT),
+                                5, NAV_HALT_ACK_TIMEOUT_US);
+
+       ret = regmap_read(halt_map, offset, &val);
+       if (ret || !(val & NAV_AXI_IDLE_BIT))
+               dev_err(qproc->dev, "port failed halt\n");
+}
+
 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
 {
        unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
@@ -829,6 +937,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
 halt_axi_ports:
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+       if (qproc->has_halt_nav)
+               q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
+                                          qproc->halt_nav);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
 
 reclaim_mba:
@@ -876,6 +987,9 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
 
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+       if (qproc->has_halt_nav)
+               q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
+                                          qproc->halt_nav);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
        if (qproc->version == MSS_MSM8996) {
                /*
@@ -1253,6 +1367,47 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
        qproc->halt_modem = args.args[1];
        qproc->halt_nc = args.args[2];
 
+       if (qproc->has_halt_nav) {
+               struct platform_device *nav_pdev;
+
+               ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+                                                      "qcom,halt-nav-regs",
+                                                      1, 0, &args);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
+                       return -EINVAL;
+               }
+
+               nav_pdev = of_find_device_by_node(args.np);
+               of_node_put(args.np);
+               if (!nav_pdev) {
+                       dev_err(&pdev->dev, "failed to get mss clock device\n");
+                       return -EPROBE_DEFER;
+               }
+
+               qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL);
+               if (!qproc->halt_nav_map) {
+                       dev_err(&pdev->dev, "failed to get map from device\n");
+                       return -EINVAL;
+               }
+               qproc->halt_nav = args.args[0];
+
+               ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+                                                      "qcom,halt-nav-regs",
+                                                      1, 1, &args);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
+                       return -EINVAL;
+               }
+
+               qproc->conn_map = syscon_node_to_regmap(args.np);
+               of_node_put(args.np);
+               if (IS_ERR(qproc->conn_map))
+                       return PTR_ERR(qproc->conn_map);
+
+               qproc->conn_box = args.args[0];
+       }
+
        return 0;
 }
 
@@ -1327,7 +1482,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
                return PTR_ERR(qproc->mss_restart);
        }
 
-       if (qproc->has_alt_reset) {
+       if (qproc->has_alt_reset || qproc->has_halt_nav) {
                qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
                                                                    "pdc_reset");
                if (IS_ERR(qproc->pdc_reset)) {
@@ -1426,6 +1581,7 @@ static int q6v5_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, qproc);
 
+       qproc->has_halt_nav = desc->has_halt_nav;
        ret = q6v5_init_mem(qproc, pdev);
        if (ret)
                goto free_rproc;
@@ -1549,6 +1705,41 @@ static int q6v5_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct rproc_hexagon_res sc7180_mss = {
+       .hexagon_mba_image = "mba.mbn",
+       .proxy_clk_names = (char*[]){
+               "xo",
+               NULL
+       },
+       .reset_clk_names = (char*[]){
+               "iface",
+               "bus",
+               "snoc_axi",
+               NULL
+       },
+       .active_clk_names = (char*[]){
+               "mnoc_axi",
+               "nav",
+               "mss_nav",
+               "mss_crypto",
+               NULL
+       },
+       .active_pd_names = (char*[]){
+               "load_state",
+               NULL
+       },
+       .proxy_pd_names = (char*[]){
+               "cx",
+               "mx",
+               "mss",
+               NULL
+       },
+       .need_mem_protection = true,
+       .has_alt_reset = false,
+       .has_halt_nav = true,
+       .version = MSS_SC7180,
+};
+
 static const struct rproc_hexagon_res sdm845_mss = {
        .hexagon_mba_image = "mba.mbn",
        .proxy_clk_names = (char*[]){
@@ -1580,6 +1771,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
        },
        .need_mem_protection = true,
        .has_alt_reset = true,
+       .has_halt_nav = false,
        .version = MSS_SDM845,
 };
 
@@ -1594,7 +1786,6 @@ static const struct rproc_hexagon_res msm8998_mss = {
        .active_clk_names = (char*[]){
                        "iface",
                        "bus",
-                       "mem",
                        "gpll0_mss",
                        "mnoc_axi",
                        "snoc_axi",
@@ -1607,6 +1798,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
        },
        .need_mem_protection = true,
        .has_alt_reset = false,
+       .has_halt_nav = false,
        .version = MSS_MSM8998,
 };
 
@@ -1636,6 +1828,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
        },
        .need_mem_protection = true,
        .has_alt_reset = false,
+       .has_halt_nav = false,
        .version = MSS_MSM8996,
 };
 
@@ -1668,6 +1861,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
        },
        .need_mem_protection = false,
        .has_alt_reset = false,
+       .has_halt_nav = false,
        .version = MSS_MSM8916,
 };
 
@@ -1708,6 +1902,7 @@ static const struct rproc_hexagon_res msm8974_mss = {
        },
        .need_mem_protection = false,
        .has_alt_reset = false,
+       .has_halt_nav = false,
        .version = MSS_MSM8974,
 };
 
@@ -1717,6 +1912,7 @@ static const struct of_device_id q6v5_of_match[] = {
        { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
        { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
        { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
+       { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
        { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
        { },
 };
index db4b3c4..edf9d0e 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 #include <linux/qcom_scm.h>
 #include <linux/regulator/consumer.h>
 #include <linux/remoteproc.h>
@@ -31,6 +33,10 @@ struct adsp_data {
        const char *firmware_name;
        int pas_id;
        bool has_aggre2_clk;
+       bool auto_boot;
+
+       char **active_pd_names;
+       char **proxy_pd_names;
 
        const char *ssr_name;
        const char *sysmon_name;
@@ -49,6 +55,12 @@ struct qcom_adsp {
        struct regulator *cx_supply;
        struct regulator *px_supply;
 
+       struct device *active_pds[1];
+       struct device *proxy_pds[3];
+
+       int active_pd_count;
+       int proxy_pd_count;
+
        int pas_id;
        int crash_reason_smem;
        bool has_aggre2_clk;
@@ -67,6 +79,41 @@ struct qcom_adsp {
        struct qcom_sysmon *sysmon;
 };
 
+static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+                          size_t pd_count)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < pd_count; i++) {
+               dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+               ret = pm_runtime_get_sync(pds[i]);
+               if (ret < 0)
+                       goto unroll_pd_votes;
+       }
+
+       return 0;
+
+unroll_pd_votes:
+       for (i--; i >= 0; i--) {
+               dev_pm_genpd_set_performance_state(pds[i], 0);
+               pm_runtime_put(pds[i]);
+       }
+
+       return ret;
+};
+
+static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+                            size_t pd_count)
+{
+       int i;
+
+       for (i = 0; i < pd_count; i++) {
+               dev_pm_genpd_set_performance_state(pds[i], 0);
+               pm_runtime_put(pds[i]);
+       }
+}
+
 static int adsp_load(struct rproc *rproc, const struct firmware *fw)
 {
        struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
@@ -84,9 +131,17 @@ static int adsp_start(struct rproc *rproc)
 
        qcom_q6v5_prepare(&adsp->q6v5);
 
+       ret = adsp_pds_enable(adsp, adsp->active_pds, adsp->active_pd_count);
+       if (ret < 0)
+               goto disable_irqs;
+
+       ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+       if (ret < 0)
+               goto disable_active_pds;
+
        ret = clk_prepare_enable(adsp->xo);
        if (ret)
-               return ret;
+               goto disable_proxy_pds;
 
        ret = clk_prepare_enable(adsp->aggre2_clk);
        if (ret)
@@ -124,6 +179,12 @@ disable_aggre2_clk:
        clk_disable_unprepare(adsp->aggre2_clk);
 disable_xo_clk:
        clk_disable_unprepare(adsp->xo);
+disable_proxy_pds:
+       adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+disable_active_pds:
+       adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count);
+disable_irqs:
+       qcom_q6v5_unprepare(&adsp->q6v5);
 
        return ret;
 }
@@ -136,6 +197,7 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
        regulator_disable(adsp->cx_supply);
        clk_disable_unprepare(adsp->aggre2_clk);
        clk_disable_unprepare(adsp->xo);
+       adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
 }
 
 static int adsp_stop(struct rproc *rproc)
@@ -152,6 +214,7 @@ static int adsp_stop(struct rproc *rproc)
        if (ret)
                dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
 
+       adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count);
        handover = qcom_q6v5_unprepare(&adsp->q6v5);
        if (handover)
                qcom_pas_handover(&adsp->q6v5);
@@ -217,6 +280,59 @@ static int adsp_init_regulator(struct qcom_adsp *adsp)
        return PTR_ERR_OR_ZERO(adsp->px_supply);
 }
 
+static int adsp_pds_attach(struct device *dev, struct device **devs,
+                          char **pd_names)
+{
+       size_t num_pds = 0;
+       int ret;
+       int i;
+
+       if (!pd_names)
+               return 0;
+
+       /* Handle single power domain */
+       if (dev->pm_domain) {
+               devs[0] = dev;
+               pm_runtime_enable(dev);
+               return 1;
+       }
+
+       while (pd_names[num_pds])
+               num_pds++;
+
+       for (i = 0; i < num_pds; i++) {
+               devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+               if (IS_ERR_OR_NULL(devs[i])) {
+                       ret = PTR_ERR(devs[i]) ? : -ENODATA;
+                       goto unroll_attach;
+               }
+       }
+
+       return num_pds;
+
+unroll_attach:
+       for (i--; i >= 0; i--)
+               dev_pm_domain_detach(devs[i], false);
+
+       return ret;
+};
+
+static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+                           size_t pd_count)
+{
+       struct device *dev = adsp->dev;
+       int i;
+
+       /* Handle single power domain */
+       if (dev->pm_domain && pd_count) {
+               pm_runtime_disable(dev);
+               return;
+       }
+
+       for (i = 0; i < pd_count; i++)
+               dev_pm_domain_detach(pds[i], false);
+}
+
 static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
 {
        struct device_node *node;
@@ -273,6 +389,8 @@ static int adsp_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       rproc->auto_boot = desc->auto_boot;
+
        adsp = (struct qcom_adsp *)rproc->priv;
        adsp->dev = &pdev->dev;
        adsp->rproc = rproc;
@@ -292,10 +410,22 @@ static int adsp_probe(struct platform_device *pdev)
        if (ret)
                goto free_rproc;
 
+       ret = adsp_pds_attach(&pdev->dev, adsp->active_pds,
+                             desc->active_pd_names);
+       if (ret < 0)
+               goto free_rproc;
+       adsp->active_pd_count = ret;
+
+       ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
+                             desc->proxy_pd_names);
+       if (ret < 0)
+               goto detach_active_pds;
+       adsp->proxy_pd_count = ret;
+
        ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
                             qcom_pas_handover);
        if (ret)
-               goto free_rproc;
+               goto detach_proxy_pds;
 
        qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
        qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
@@ -305,15 +435,19 @@ static int adsp_probe(struct platform_device *pdev)
                                              desc->ssctl_id);
        if (IS_ERR(adsp->sysmon)) {
                ret = PTR_ERR(adsp->sysmon);
-               goto free_rproc;
+               goto detach_proxy_pds;
        }
 
        ret = rproc_add(rproc);
        if (ret)
-               goto free_rproc;
+               goto detach_proxy_pds;
 
        return 0;
 
+detach_proxy_pds:
+       adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+detach_active_pds:
+       adsp_pds_detach(adsp, adsp->active_pds, adsp->active_pd_count);
 free_rproc:
        rproc_free(rproc);
 
@@ -340,6 +474,41 @@ static const struct adsp_data adsp_resource_init = {
                .firmware_name = "adsp.mdt",
                .pas_id = 1,
                .has_aggre2_clk = false,
+               .auto_boot = true,
+               .ssr_name = "lpass",
+               .sysmon_name = "adsp",
+               .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8150_adsp_resource = {
+               .crash_reason_smem = 423,
+               .firmware_name = "adsp.mdt",
+               .pas_id = 1,
+               .has_aggre2_clk = false,
+               .auto_boot = true,
+               .active_pd_names = (char*[]){
+                       "load_state",
+                       NULL
+               },
+               .proxy_pd_names = (char*[]){
+                       "cx",
+                       NULL
+               },
+               .ssr_name = "lpass",
+               .sysmon_name = "adsp",
+               .ssctl_id = 0x14,
+};
+
+static const struct adsp_data msm8998_adsp_resource = {
+               .crash_reason_smem = 423,
+               .firmware_name = "adsp.mdt",
+               .pas_id = 1,
+               .has_aggre2_clk = false,
+               .auto_boot = true,
+               .proxy_pd_names = (char*[]){
+                       "cx",
+                       NULL
+               },
                .ssr_name = "lpass",
                .sysmon_name = "adsp",
                .ssctl_id = 0x14,
@@ -350,16 +519,92 @@ static const struct adsp_data cdsp_resource_init = {
        .firmware_name = "cdsp.mdt",
        .pas_id = 18,
        .has_aggre2_clk = false,
+       .auto_boot = true,
+       .ssr_name = "cdsp",
+       .sysmon_name = "cdsp",
+       .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sm8150_cdsp_resource = {
+       .crash_reason_smem = 601,
+       .firmware_name = "cdsp.mdt",
+       .pas_id = 18,
+       .has_aggre2_clk = false,
+       .auto_boot = true,
+       .active_pd_names = (char*[]){
+               "load_state",
+               NULL
+       },
+       .proxy_pd_names = (char*[]){
+               "cx",
+               NULL
+       },
        .ssr_name = "cdsp",
        .sysmon_name = "cdsp",
        .ssctl_id = 0x17,
 };
 
+static const struct adsp_data mpss_resource_init = {
+       .crash_reason_smem = 421,
+       .firmware_name = "modem.mdt",
+       .pas_id = 4,
+       .has_aggre2_clk = false,
+       .auto_boot = false,
+       .active_pd_names = (char*[]){
+               "load_state",
+               NULL
+       },
+       .proxy_pd_names = (char*[]){
+               "cx",
+               "mss",
+               NULL
+       },
+       .ssr_name = "mpss",
+       .sysmon_name = "modem",
+       .ssctl_id = 0x12,
+};
+
 static const struct adsp_data slpi_resource_init = {
                .crash_reason_smem = 424,
                .firmware_name = "slpi.mdt",
                .pas_id = 12,
                .has_aggre2_clk = true,
+               .auto_boot = true,
+               .ssr_name = "dsps",
+               .sysmon_name = "slpi",
+               .ssctl_id = 0x16,
+};
+
+static const struct adsp_data sm8150_slpi_resource = {
+               .crash_reason_smem = 424,
+               .firmware_name = "slpi.mdt",
+               .pas_id = 12,
+               .has_aggre2_clk = false,
+               .auto_boot = true,
+               .active_pd_names = (char*[]){
+                       "load_state",
+                       NULL
+               },
+               .proxy_pd_names = (char*[]){
+                       "lcx",
+                       "lmx",
+                       NULL
+               },
+               .ssr_name = "dsps",
+               .sysmon_name = "slpi",
+               .ssctl_id = 0x16,
+};
+
+static const struct adsp_data msm8998_slpi_resource = {
+               .crash_reason_smem = 424,
+               .firmware_name = "slpi.mdt",
+               .pas_id = 12,
+               .has_aggre2_clk = true,
+               .auto_boot = true,
+               .proxy_pd_names = (char*[]){
+                       "ssc_cx",
+                       NULL
+               },
                .ssr_name = "dsps",
                .sysmon_name = "slpi",
                .ssctl_id = 0x16,
@@ -369,6 +614,7 @@ static const struct adsp_data wcss_resource_init = {
        .crash_reason_smem = 421,
        .firmware_name = "wcnss.mdt",
        .pas_id = 6,
+       .auto_boot = true,
        .ssr_name = "mpss",
        .sysmon_name = "wcnss",
        .ssctl_id = 0x12,
@@ -378,11 +624,17 @@ static const struct of_device_id adsp_of_match[] = {
        { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
        { .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
        { .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
+       { .compatible = "qcom,msm8998-adsp-pas", .data = &msm8998_adsp_resource},
+       { .compatible = "qcom,msm8998-slpi-pas", .data = &msm8998_slpi_resource},
        { .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
        { .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
        { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
        { .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
        { .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
+       { .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
+       { .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
+       { .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
+       { .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
        { },
 };
 MODULE_DEVICE_TABLE(of, adsp_of_match);
index c231314..faf3822 100644 (file)
@@ -394,7 +394,7 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
                break;
        default:
                return -EINVAL;
-       };
+       }
 
        sysmon->ssctl_version = svc->version;
 
index 307df98..097f33e 100644 (file)
@@ -477,8 +477,8 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
        char name[16];
 
        /* make sure resource isn't truncated */
-       if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
-                       + rsc->config_len > avail) {
+       if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
+                       avail) {
                dev_err(dev, "vdev rsc is truncated\n");
                return -EINVAL;
        }
@@ -2223,7 +2223,7 @@ static int __init remoteproc_init(void)
 
        return 0;
 }
-module_init(remoteproc_init);
+subsys_initcall(remoteproc_init);
 
 static void __exit remoteproc_exit(void)
 {
index 7092765..a9108ff 100644 (file)
@@ -15,6 +15,15 @@ config RPMSG_CHAR
          in /dev. They make it possible for user-space programs to send and
          receive rpmsg packets.
 
+config RPMSG_MTK_SCP
+       tristate "MediaTek SCP"
+       depends on MTK_SCP
+       select RPMSG
+       help
+         Say y here to enable support providing communication channels to
+         remote processors in MediaTek platforms.
+         This use IPI and IPC to communicate with remote processors.
+
 config RPMSG_QCOM_GLINK_NATIVE
        tristate
        select RPMSG
index 9aa8595..ae92a7f 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_RPMSG)            += rpmsg_core.o
 obj-$(CONFIG_RPMSG_CHAR)       += rpmsg_char.o
+obj-$(CONFIG_RPMSG_MTK_SCP)    += mtk_rpmsg.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
new file mode 100644 (file)
index 0000000..232aa4e
--- /dev/null
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2019 Google LLC.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+#include <linux/workqueue.h>
+
+#include "rpmsg_internal.h"
+
+struct mtk_rpmsg_rproc_subdev {
+       struct platform_device *pdev;
+       struct mtk_rpmsg_info *info;
+       struct rpmsg_endpoint *ns_ept;
+       struct rproc_subdev subdev;
+
+       struct work_struct register_work;
+       struct list_head channels;
+       struct mutex channels_lock;
+};
+
+#define to_mtk_subdev(d) container_of(d, struct mtk_rpmsg_rproc_subdev, subdev)
+
+struct mtk_rpmsg_channel_info {
+       struct rpmsg_channel_info info;
+       bool registered;
+       struct list_head list;
+};
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ *
+ * This message is sent across to publish a new service. When we receive these
+ * messages, an appropriate rpmsg channel (i.e device) is created. In turn, the
+ * ->probe() handler of the appropriate rpmsg driver will be invoked
+ *  (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+       char name[RPMSG_NAME_SIZE];
+       u32 addr;
+} __packed;
+
+struct mtk_rpmsg_device {
+       struct rpmsg_device rpdev;
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+};
+
+struct mtk_rpmsg_endpoint {
+       struct rpmsg_endpoint ept;
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+};
+
+#define to_mtk_rpmsg_device(r) container_of(r, struct mtk_rpmsg_device, rpdev)
+#define to_mtk_rpmsg_endpoint(r) container_of(r, struct mtk_rpmsg_endpoint, ept)
+
+static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops;
+
+static void __mtk_ept_release(struct kref *kref)
+{
+       struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+                                                 refcount);
+       kfree(to_mtk_rpmsg_endpoint(ept));
+}
+
+static void mtk_rpmsg_ipi_handler(void *data, unsigned int len, void *priv)
+{
+       struct mtk_rpmsg_endpoint *mept = priv;
+       struct rpmsg_endpoint *ept = &mept->ept;
+       int ret;
+
+       ret = (*ept->cb)(ept->rpdev, data, len, ept->priv, ept->addr);
+       if (ret)
+               dev_warn(&ept->rpdev->dev, "rpmsg handler return error = %d",
+                        ret);
+}
+
+static struct rpmsg_endpoint *
+__mtk_create_ept(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+                struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
+                u32 id)
+{
+       struct mtk_rpmsg_endpoint *mept;
+       struct rpmsg_endpoint *ept;
+       struct platform_device *pdev = mtk_subdev->pdev;
+       int ret;
+
+       mept = kzalloc(sizeof(*mept), GFP_KERNEL);
+       if (!mept)
+               return NULL;
+       mept->mtk_subdev = mtk_subdev;
+
+       ept = &mept->ept;
+       kref_init(&ept->refcount);
+
+       ept->rpdev = rpdev;
+       ept->cb = cb;
+       ept->priv = priv;
+       ept->ops = &mtk_rpmsg_endpoint_ops;
+       ept->addr = id;
+
+       ret = mtk_subdev->info->register_ipi(pdev, id, mtk_rpmsg_ipi_handler,
+                                            mept);
+       if (ret) {
+               dev_err(&pdev->dev, "IPI register failed, id = %d", id);
+               kref_put(&ept->refcount, __mtk_ept_release);
+               return NULL;
+       }
+
+       return ept;
+}
+
+static struct rpmsg_endpoint *
+mtk_rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
+                    struct rpmsg_channel_info chinfo)
+{
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+               to_mtk_rpmsg_device(rpdev)->mtk_subdev;
+
+       return __mtk_create_ept(mtk_subdev, rpdev, cb, priv, chinfo.src);
+}
+
+static void mtk_rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+               to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+       mtk_subdev->info->unregister_ipi(mtk_subdev->pdev, ept->addr);
+       kref_put(&ept->refcount, __mtk_ept_release);
+}
+
+static int mtk_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+               to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+       return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
+                                         len, 0);
+}
+
+static int mtk_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+               to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+       /*
+        * TODO: This currently is same as mtk_rpmsg_send, and wait until SCP
+        * received the last command.
+        */
+       return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
+                                         len, 0);
+}
+
+static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops = {
+       .destroy_ept = mtk_rpmsg_destroy_ept,
+       .send = mtk_rpmsg_send,
+       .trysend = mtk_rpmsg_trysend,
+};
+
+static void mtk_rpmsg_release_device(struct device *dev)
+{
+       struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+       struct mtk_rpmsg_device *mdev = to_mtk_rpmsg_device(rpdev);
+
+       kfree(mdev);
+}
+
+static const struct rpmsg_device_ops mtk_rpmsg_device_ops = {
+       .create_ept = mtk_rpmsg_create_ept,
+};
+
+static struct device_node *
+mtk_rpmsg_match_device_subnode(struct device_node *node, const char *channel)
+{
+       struct device_node *child;
+       const char *name;
+       int ret;
+
+       for_each_available_child_of_node(node, child) {
+               ret = of_property_read_string(child, "mtk,rpmsg-name", &name);
+               if (ret)
+                       continue;
+
+               if (strcmp(name, channel) == 0)
+                       return child;
+       }
+
+       return NULL;
+}
+
+static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+                                    struct rpmsg_channel_info *info)
+{
+       struct rpmsg_device *rpdev;
+       struct mtk_rpmsg_device *mdev;
+       struct platform_device *pdev = mtk_subdev->pdev;
+       int ret;
+
+       mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+       if (!mdev)
+               return -ENOMEM;
+
+       mdev->mtk_subdev = mtk_subdev;
+
+       rpdev = &mdev->rpdev;
+       rpdev->ops = &mtk_rpmsg_device_ops;
+       rpdev->src = info->src;
+       rpdev->dst = info->dst;
+       strscpy(rpdev->id.name, info->name, RPMSG_NAME_SIZE);
+
+       rpdev->dev.of_node =
+               mtk_rpmsg_match_device_subnode(pdev->dev.of_node, info->name);
+       rpdev->dev.parent = &pdev->dev;
+       rpdev->dev.release = mtk_rpmsg_release_device;
+
+       ret = rpmsg_register_device(rpdev);
+       if (ret) {
+               kfree(mdev);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void mtk_register_device_work_function(struct work_struct *register_work)
+{
+       struct mtk_rpmsg_rproc_subdev *subdev = container_of(
+               register_work, struct mtk_rpmsg_rproc_subdev, register_work);
+       struct platform_device *pdev = subdev->pdev;
+       struct mtk_rpmsg_channel_info *info;
+       int ret;
+
+       mutex_lock(&subdev->channels_lock);
+       list_for_each_entry(info, &subdev->channels, list) {
+               if (info->registered)
+                       continue;
+
+               ret = mtk_rpmsg_register_device(subdev, &info->info);
+               if (ret) {
+                       dev_err(&pdev->dev, "Can't create rpmsg_device\n");
+                       continue;
+               }
+
+               info->registered = true;
+       }
+       mutex_unlock(&subdev->channels_lock);
+}
+
+static int mtk_rpmsg_create_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+                                  char *name, u32 addr)
+{
+       struct mtk_rpmsg_channel_info *info;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       strscpy(info->info.name, name, RPMSG_NAME_SIZE);
+       info->info.src = addr;
+       info->info.dst = RPMSG_ADDR_ANY;
+       mutex_lock(&mtk_subdev->channels_lock);
+       list_add(&info->list, &mtk_subdev->channels);
+       mutex_unlock(&mtk_subdev->channels_lock);
+
+       schedule_work(&mtk_subdev->register_work);
+       return 0;
+}
+
+static int mtk_rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
+                          void *priv, u32 src)
+{
+       struct rpmsg_ns_msg *msg = data;
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev = priv;
+       struct device *dev = &mtk_subdev->pdev->dev;
+
+       int ret;
+
+       if (len != sizeof(*msg)) {
+               dev_err(dev, "malformed ns msg (%d)\n", len);
+               return -EINVAL;
+       }
+
+       /*
+        * the name service ept does _not_ belong to a real rpmsg channel,
+        * and is handled by the rpmsg bus itself.
+        * for sanity reasons, make sure a valid rpdev has _not_ sneaked
+        * in somehow.
+        */
+       if (rpdev) {
+               dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
+               return -EINVAL;
+       }
+
+       /* don't trust the remote processor for null terminating the name */
+       msg->name[RPMSG_NAME_SIZE - 1] = '\0';
+
+       dev_info(dev, "creating channel %s addr 0x%x\n", msg->name, msg->addr);
+
+       ret = mtk_rpmsg_create_device(mtk_subdev, msg->name, msg->addr);
+       if (ret) {
+               dev_err(dev, "create rpmsg device failed\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int mtk_rpmsg_prepare(struct rproc_subdev *subdev)
+{
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+       /* a dedicated endpoint handles the name service msgs */
+       if (mtk_subdev->info->ns_ipi_id >= 0) {
+               mtk_subdev->ns_ept =
+                       __mtk_create_ept(mtk_subdev, NULL, mtk_rpmsg_ns_cb,
+                                        mtk_subdev,
+                                        mtk_subdev->info->ns_ipi_id);
+               if (!mtk_subdev->ns_ept) {
+                       dev_err(&mtk_subdev->pdev->dev,
+                               "failed to create name service endpoint\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static void mtk_rpmsg_unprepare(struct rproc_subdev *subdev)
+{
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+       if (mtk_subdev->ns_ept) {
+               mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
+               mtk_subdev->ns_ept = NULL;
+       }
+}
+
+static void mtk_rpmsg_stop(struct rproc_subdev *subdev, bool crashed)
+{
+       struct mtk_rpmsg_channel_info *info, *next;
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+       struct device *dev = &mtk_subdev->pdev->dev;
+
+       /*
+        * Destroy the name service endpoint here, to avoid new channel being
+        * created after the rpmsg_unregister_device loop below.
+        */
+       if (mtk_subdev->ns_ept) {
+               mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
+               mtk_subdev->ns_ept = NULL;
+       }
+
+       cancel_work_sync(&mtk_subdev->register_work);
+
+       mutex_lock(&mtk_subdev->channels_lock);
+       list_for_each_entry(info, &mtk_subdev->channels, list) {
+               if (!info->registered)
+                       continue;
+               if (rpmsg_unregister_device(dev, &info->info)) {
+                       dev_warn(
+                               dev,
+                               "rpmsg_unregister_device failed for %s.%d.%d\n",
+                               info->info.name, info->info.src,
+                               info->info.dst);
+               }
+       }
+
+       list_for_each_entry_safe(info, next,
+                                &mtk_subdev->channels, list) {
+               list_del(&info->list);
+               kfree(info);
+       }
+       mutex_unlock(&mtk_subdev->channels_lock);
+}
+
+struct rproc_subdev *
+mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
+                             struct mtk_rpmsg_info *info)
+{
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+
+       mtk_subdev = kzalloc(sizeof(*mtk_subdev), GFP_KERNEL);
+       if (!mtk_subdev)
+               return NULL;
+
+       mtk_subdev->pdev = pdev;
+       mtk_subdev->subdev.prepare = mtk_rpmsg_prepare;
+       mtk_subdev->subdev.stop = mtk_rpmsg_stop;
+       mtk_subdev->subdev.unprepare = mtk_rpmsg_unprepare;
+       mtk_subdev->info = info;
+       INIT_LIST_HEAD(&mtk_subdev->channels);
+       INIT_WORK(&mtk_subdev->register_work,
+                 mtk_register_device_work_function);
+       mutex_init(&mtk_subdev->channels_lock);
+
+       return &mtk_subdev->subdev;
+}
+EXPORT_SYMBOL_GPL(mtk_rpmsg_create_rproc_subdev);
+
+void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev)
+{
+       struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+       kfree(mtk_subdev);
+}
+EXPORT_SYMBOL_GPL(mtk_rpmsg_destroy_rproc_subdev);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp rpmsg driver");
index df7a384..34c8b6c 100644 (file)
@@ -240,6 +240,7 @@ config RTC_DRV_AS3722
 
 config RTC_DRV_DS1307
        tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057"
+       select REGMAP_I2C
        help
          If you say yes here you get support for various compatible RTC
          chips (often with battery backup) connected with I2C. This driver
@@ -622,6 +623,7 @@ config RTC_DRV_RX8010
 
 config RTC_DRV_RX8581
        tristate "Epson RX-8571/RX-8581"
+       select REGMAP_I2C
        help
          If you say yes here you will get support for the Epson RX-8571/
          RX-8581.
@@ -649,6 +651,7 @@ config RTC_DRV_EM3027
 
 config RTC_DRV_RV3028
        tristate "Micro Crystal RV3028"
+       select REGMAP_I2C
        help
          If you say yes here you get support for the Micro Crystal
          RV3028.
@@ -677,13 +680,14 @@ config RTC_DRV_S5M
          will be called rtc-s5m.
 
 config RTC_DRV_SD3078
-    tristate "ZXW Shenzhen whwave SD3078"
-    help
-      If you say yes here you get support for the ZXW Shenzhen whwave
-      SD3078 RTC chips.
+       tristate "ZXW Shenzhen whwave SD3078"
+       select REGMAP_I2C
+       help
+         If you say yes here you get support for the ZXW Shenzhen whwave
+         SD3078 RTC chips.
 
-      This driver can also be built as a module. If so, the module
-      will be called rtc-sd3078
+         This driver can also be built as a module. If so, the module
+         will be called rtc-sd3078
 
 endif # I2C
 
@@ -849,14 +853,14 @@ config RTC_I2C_AND_SPI
        default m if I2C=m
        default y if I2C=y
        default y if SPI_MASTER=y
-       select REGMAP_I2C if I2C
-       select REGMAP_SPI if SPI_MASTER
 
 comment "SPI and I2C RTC drivers"
 
 config RTC_DRV_DS3232
        tristate "Dallas/Maxim DS3232/DS3234"
        depends on RTC_I2C_AND_SPI
+       select REGMAP_I2C if I2C
+       select REGMAP_SPI if SPI_MASTER
        help
          If you say yes here you get support for Dallas Semiconductor
          DS3232 and DS3234 real-time clock chips. If an interrupt is associated
@@ -876,6 +880,8 @@ config RTC_DRV_DS3232_HWMON
 config RTC_DRV_PCF2127
        tristate "NXP PCF2127"
        depends on RTC_I2C_AND_SPI
+       select REGMAP_I2C if I2C
+       select REGMAP_SPI if SPI_MASTER
        select WATCHDOG_CORE if WATCHDOG
        help
          If you say yes here you get support for the NXP PCF2127/29 RTC
@@ -892,6 +898,8 @@ config RTC_DRV_PCF2127
 config RTC_DRV_RV3029C2
        tristate "Micro Crystal RV3029/3049"
        depends on RTC_I2C_AND_SPI
+       select REGMAP_I2C if I2C
+       select REGMAP_SPI if SPI_MASTER
        help
          If you say yes here you get support for the Micro Crystal
          RV3029 and RV3049 RTC chips.
index 7383067..3521d8e 100644 (file)
@@ -523,12 +523,9 @@ static int abx80x_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
                if (status < 0)
                        return status;
 
-               tmp = !!(status & ABX8XX_STATUS_BLF);
+               tmp = status & ABX8XX_STATUS_BLF ? RTC_VL_BACKUP_LOW : 0;
 
-               if (copy_to_user((void __user *)arg, &tmp, sizeof(int)))
-                       return -EFAULT;
-
-               return 0;
+               return put_user(tmp, (unsigned int __user *)arg);
 
        case RTC_VL_CLR:
                status = i2c_smbus_read_byte_data(client, ABX8XX_REG_STATUS);
index 10064bd..3ab81cd 100644 (file)
@@ -264,6 +264,9 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
                return PTR_ERR(priv->iobase);
 
        priv->clk = devm_clk_get(dev, "ahb");
+       if (IS_ERR(priv->clk))
+               return PTR_ERR(priv->clk);
+
        ret = clk_prepare_enable(priv->clk);
        if (ret) {
                dev_err(dev, "Failed to enable clk!\n");
index 3b833e0..5e811e0 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/bcd.h>
+#include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/interrupt.h>
 #include <linux/time.h>
 #include <linux/uaccess.h>
 
-#include "rtc-at91rm9200.h"
+#define        AT91_RTC_CR             0x00                    /* Control Register */
+#define                AT91_RTC_UPDTIM         BIT(0)          /* Update Request Time Register */
+#define                AT91_RTC_UPDCAL         BIT(1)          /* Update Request Calendar Register */
+
+#define        AT91_RTC_MR             0x04                    /* Mode Register */
+
+#define        AT91_RTC_TIMR           0x08                    /* Time Register */
+#define                AT91_RTC_SEC            GENMASK(6, 0)   /* Current Second */
+#define                AT91_RTC_MIN            GENMASK(14, 8)  /* Current Minute */
+#define                AT91_RTC_HOUR           GENMASK(21, 16) /* Current Hour */
+#define                AT91_RTC_AMPM           BIT(22)         /* Ante Meridiem Post Meridiem Indicator */
+
+#define        AT91_RTC_CALR           0x0c                    /* Calendar Register */
+#define                AT91_RTC_CENT           GENMASK(6, 0)   /* Current Century */
+#define                AT91_RTC_YEAR           GENMASK(15, 8)  /* Current Year */
+#define                AT91_RTC_MONTH          GENMASK(20, 16) /* Current Month */
+#define                AT91_RTC_DAY            GENMASK(23, 21) /* Current Day */
+#define                AT91_RTC_DATE           GENMASK(29, 24) /* Current Date */
+
+#define        AT91_RTC_TIMALR         0x10                    /* Time Alarm Register */
+#define                AT91_RTC_SECEN          BIT(7)          /* Second Alarm Enable */
+#define                AT91_RTC_MINEN          BIT(15)         /* Minute Alarm Enable */
+#define                AT91_RTC_HOUREN         BIT(23)         /* Hour Alarm Enable */
+
+#define        AT91_RTC_CALALR         0x14                    /* Calendar Alarm Register */
+#define                AT91_RTC_MTHEN          BIT(23)         /* Month Alarm Enable */
+#define                AT91_RTC_DATEEN         BIT(31)         /* Date Alarm Enable */
+
+#define        AT91_RTC_SR             0x18                    /* Status Register */
+#define                AT91_RTC_ACKUPD         BIT(0)          /* Acknowledge for Update */
+#define                AT91_RTC_ALARM          BIT(1)          /* Alarm Flag */
+#define                AT91_RTC_SECEV          BIT(2)          /* Second Event */
+#define                AT91_RTC_TIMEV          BIT(3)          /* Time Event */
+#define                AT91_RTC_CALEV          BIT(4)          /* Calendar Event */
+
+#define        AT91_RTC_SCCR           0x1c                    /* Status Clear Command Register */
+#define        AT91_RTC_IER            0x20                    /* Interrupt Enable Register */
+#define        AT91_RTC_IDR            0x24                    /* Interrupt Disable Register */
+#define        AT91_RTC_IMR            0x28                    /* Interrupt Mask Register */
+
+#define        AT91_RTC_VER            0x2c                    /* Valid Entry Register */
+#define                AT91_RTC_NVTIM          BIT(0)          /* Non valid Time */
+#define                AT91_RTC_NVCAL          BIT(1)          /* Non valid Calendar */
+#define                AT91_RTC_NVTIMALR       BIT(2)          /* Non valid Time Alarm */
+#define                AT91_RTC_NVCALALR       BIT(3)          /* Non valid Calendar Alarm */
 
 #define at91_rtc_read(field) \
        readl_relaxed(at91_rtc_regs + field)
@@ -117,20 +162,20 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
        } while ((time != at91_rtc_read(timereg)) ||
                        (date != at91_rtc_read(calreg)));
 
-       tm->tm_sec  = bcd2bin((time & AT91_RTC_SEC) >> 0);
-       tm->tm_min  = bcd2bin((time & AT91_RTC_MIN) >> 8);
-       tm->tm_hour = bcd2bin((time & AT91_RTC_HOUR) >> 16);
+       tm->tm_sec  = bcd2bin(FIELD_GET(AT91_RTC_SEC, time));
+       tm->tm_min  = bcd2bin(FIELD_GET(AT91_RTC_MIN, time));
+       tm->tm_hour = bcd2bin(FIELD_GET(AT91_RTC_HOUR, time));
 
        /*
         * The Calendar Alarm register does not have a field for
         * the year - so these will return an invalid value.
         */
        tm->tm_year  = bcd2bin(date & AT91_RTC_CENT) * 100;     /* century */
-       tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8);    /* year */
+       tm->tm_year += bcd2bin(FIELD_GET(AT91_RTC_YEAR, date)); /* year */
 
-       tm->tm_wday = bcd2bin((date & AT91_RTC_DAY) >> 21) - 1; /* day of the week [0-6], Sunday=0 */
-       tm->tm_mon  = bcd2bin((date & AT91_RTC_MONTH) >> 16) - 1;
-       tm->tm_mday = bcd2bin((date & AT91_RTC_DATE) >> 24);
+       tm->tm_wday = bcd2bin(FIELD_GET(AT91_RTC_DAY, date)) - 1;       /* day of the week [0-6], Sunday=0 */
+       tm->tm_mon  = bcd2bin(FIELD_GET(AT91_RTC_MONTH, date)) - 1;
+       tm->tm_mday = bcd2bin(FIELD_GET(AT91_RTC_DATE, date));
 }
 
 /*
@@ -167,16 +212,17 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
        at91_rtc_write_idr(AT91_RTC_ACKUPD);
 
        at91_rtc_write(AT91_RTC_TIMR,
-                         bin2bcd(tm->tm_sec) << 0
-                       | bin2bcd(tm->tm_min) << 8
-                       | bin2bcd(tm->tm_hour) << 16);
+                         FIELD_PREP(AT91_RTC_SEC, bin2bcd(tm->tm_sec))
+                       | FIELD_PREP(AT91_RTC_MIN, bin2bcd(tm->tm_min))
+                       | FIELD_PREP(AT91_RTC_HOUR, bin2bcd(tm->tm_hour)));
 
        at91_rtc_write(AT91_RTC_CALR,
-                         bin2bcd((tm->tm_year + 1900) / 100)   /* century */
-                       | bin2bcd(tm->tm_year % 100) << 8       /* year */
-                       | bin2bcd(tm->tm_mon + 1) << 16         /* tm_mon starts at zero */
-                       | bin2bcd(tm->tm_wday + 1) << 21        /* day of the week [0-6], Sunday=0 */
-                       | bin2bcd(tm->tm_mday) << 24);
+                         FIELD_PREP(AT91_RTC_CENT,
+                                    bin2bcd((tm->tm_year + 1900) / 100))
+                       | FIELD_PREP(AT91_RTC_YEAR, bin2bcd(tm->tm_year % 100))
+                       | FIELD_PREP(AT91_RTC_MONTH, bin2bcd(tm->tm_mon + 1))
+                       | FIELD_PREP(AT91_RTC_DAY, bin2bcd(tm->tm_wday + 1))
+                       | FIELD_PREP(AT91_RTC_DATE, bin2bcd(tm->tm_mday)));
 
        /* Restart Time/Calendar */
        cr = at91_rtc_read(AT91_RTC_CR);
@@ -211,25 +257,17 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
  */
 static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
-       struct rtc_time tm;
-
-       at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
-
-       tm.tm_mon = alrm->time.tm_mon;
-       tm.tm_mday = alrm->time.tm_mday;
-       tm.tm_hour = alrm->time.tm_hour;
-       tm.tm_min = alrm->time.tm_min;
-       tm.tm_sec = alrm->time.tm_sec;
+       struct rtc_time tm = alrm->time;
 
        at91_rtc_write_idr(AT91_RTC_ALARM);
        at91_rtc_write(AT91_RTC_TIMALR,
-                 bin2bcd(tm.tm_sec) << 0
-               | bin2bcd(tm.tm_min) << 8
-               | bin2bcd(tm.tm_hour) << 16
+                 FIELD_PREP(AT91_RTC_SEC, bin2bcd(alrm->time.tm_sec))
+               | FIELD_PREP(AT91_RTC_MIN, bin2bcd(alrm->time.tm_min))
+               | FIELD_PREP(AT91_RTC_HOUR, bin2bcd(alrm->time.tm_hour))
                | AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
        at91_rtc_write(AT91_RTC_CALALR,
-                 bin2bcd(tm.tm_mon + 1) << 16          /* tm_mon starts at zero */
-               | bin2bcd(tm.tm_mday) << 24
+                 FIELD_PREP(AT91_RTC_MONTH, bin2bcd(alrm->time.tm_mon + 1))
+               | FIELD_PREP(AT91_RTC_DATE, bin2bcd(alrm->time.tm_mday))
                | AT91_RTC_DATEEN | AT91_RTC_MTHEN);
 
        if (alrm->enabled) {
@@ -254,20 +292,6 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 
        return 0;
 }
-/*
- * Provide additional RTC information in /proc/driver/rtc
- */
-static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
-{
-       unsigned long imr = at91_rtc_read_imr();
-
-       seq_printf(seq, "update_IRQ\t: %s\n",
-                       (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
-       seq_printf(seq, "periodic_IRQ\t: %s\n",
-                       (imr & AT91_RTC_SECEV) ? "yes" : "no");
-
-       return 0;
-}
 
 /*
  * IRQ handler for the RTC
@@ -326,6 +350,12 @@ static const struct of_device_id at91_rtc_dt_ids[] = {
        }, {
                .compatible = "atmel,at91sam9x5-rtc",
                .data = &at91sam9x5_config,
+       }, {
+               .compatible = "atmel,sama5d4-rtc",
+               .data = &at91rm9200_config,
+       }, {
+               .compatible = "atmel,sama5d2-rtc",
+               .data = &at91rm9200_config,
        }, {
                /* sentinel */
        }
@@ -337,7 +367,6 @@ static const struct rtc_class_ops at91_rtc_ops = {
        .set_time       = at91_rtc_settime,
        .read_alarm     = at91_rtc_readalarm,
        .set_alarm      = at91_rtc_setalarm,
-       .proc           = at91_rtc_proc,
        .alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h
deleted file mode 100644 (file)
index 8be5289..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * arch/arm/mach-at91/include/mach/at91_rtc.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * Real Time Clock (RTC) - System peripheral registers.
- * Based on AT91RM9200 datasheet revision E.
- */
-
-#ifndef AT91_RTC_H
-#define AT91_RTC_H
-
-#define        AT91_RTC_CR             0x00                    /* Control Register */
-#define                AT91_RTC_UPDTIM         (1 <<  0)               /* Update Request Time Register */
-#define                AT91_RTC_UPDCAL         (1 <<  1)               /* Update Request Calendar Register */
-#define                AT91_RTC_TIMEVSEL       (3 <<  8)               /* Time Event Selection */
-#define                        AT91_RTC_TIMEVSEL_MINUTE        (0 << 8)
-#define                        AT91_RTC_TIMEVSEL_HOUR          (1 << 8)
-#define                        AT91_RTC_TIMEVSEL_DAY24         (2 << 8)
-#define                        AT91_RTC_TIMEVSEL_DAY12         (3 << 8)
-#define                AT91_RTC_CALEVSEL       (3 << 16)               /* Calendar Event Selection */
-#define                        AT91_RTC_CALEVSEL_WEEK          (0 << 16)
-#define                        AT91_RTC_CALEVSEL_MONTH         (1 << 16)
-#define                        AT91_RTC_CALEVSEL_YEAR          (2 << 16)
-
-#define        AT91_RTC_MR             0x04                    /* Mode Register */
-#define                        AT91_RTC_HRMOD          (1 <<  0)               /* 12/24 Hour Mode */
-
-#define        AT91_RTC_TIMR           0x08                    /* Time Register */
-#define                AT91_RTC_SEC            (0x7f <<  0)            /* Current Second */
-#define                AT91_RTC_MIN            (0x7f <<  8)            /* Current Minute */
-#define                AT91_RTC_HOUR           (0x3f << 16)            /* Current Hour */
-#define                AT91_RTC_AMPM           (1    << 22)            /* Ante Meridiem Post Meridiem Indicator */
-
-#define        AT91_RTC_CALR           0x0c                    /* Calendar Register */
-#define                AT91_RTC_CENT           (0x7f <<  0)            /* Current Century */
-#define                AT91_RTC_YEAR           (0xff <<  8)            /* Current Year */
-#define                AT91_RTC_MONTH          (0x1f << 16)            /* Current Month */
-#define                AT91_RTC_DAY            (7    << 21)            /* Current Day */
-#define                AT91_RTC_DATE           (0x3f << 24)            /* Current Date */
-
-#define        AT91_RTC_TIMALR         0x10                    /* Time Alarm Register */
-#define                AT91_RTC_SECEN          (1 <<  7)               /* Second Alarm Enable */
-#define                AT91_RTC_MINEN          (1 << 15)               /* Minute Alarm Enable */
-#define                AT91_RTC_HOUREN         (1 << 23)               /* Hour Alarm Enable */
-
-#define        AT91_RTC_CALALR         0x14                    /* Calendar Alarm Register */
-#define                AT91_RTC_MTHEN          (1 << 23)               /* Month Alarm Enable */
-#define                AT91_RTC_DATEEN         (1 << 31)               /* Date Alarm Enable */
-
-#define        AT91_RTC_SR             0x18                    /* Status Register */
-#define                AT91_RTC_ACKUPD         (1 <<  0)               /* Acknowledge for Update */
-#define                AT91_RTC_ALARM          (1 <<  1)               /* Alarm Flag */
-#define                AT91_RTC_SECEV          (1 <<  2)               /* Second Event */
-#define                AT91_RTC_TIMEV          (1 <<  3)               /* Time Event */
-#define                AT91_RTC_CALEV          (1 <<  4)               /* Calendar Event */
-
-#define        AT91_RTC_SCCR           0x1c                    /* Status Clear Command Register */
-#define        AT91_RTC_IER            0x20                    /* Interrupt Enable Register */
-#define        AT91_RTC_IDR            0x24                    /* Interrupt Disable Register */
-#define        AT91_RTC_IMR            0x28                    /* Interrupt Mask Register */
-
-#define        AT91_RTC_VER            0x2c                    /* Valid Entry Register */
-#define                AT91_RTC_NVTIM          (1 <<  0)               /* Non valid Time */
-#define                AT91_RTC_NVCAL          (1 <<  1)               /* Non valid Calendar */
-#define                AT91_RTC_NVTIMALR       (1 <<  2)               /* Non valid Time Alarm */
-#define                AT91_RTC_NVCALALR       (1 <<  3)               /* Non valid Calendar Alarm */
-
-#endif
index 0333037..b795fe4 100644 (file)
@@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
                        rtc_cmos_int_handler = cmos_interrupt;
 
                retval = request_irq(rtc_irq, rtc_cmos_int_handler,
-                               IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
+                               0, dev_name(&cmos_rtc.rtc->dev),
                                cmos_rtc.rtc);
                if (retval < 0) {
                        dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
@@ -1197,8 +1197,6 @@ static void rtc_wake_off(struct device *dev)
 /* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
 static void use_acpi_alarm_quirks(void)
 {
-       int year;
-
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                return;
 
@@ -1208,8 +1206,10 @@ static void use_acpi_alarm_quirks(void)
        if (!is_hpet_enabled())
                return;
 
-       if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2015)
-               use_acpi_alarm = true;
+       if (dmi_get_bios_year() < 2015)
+               return;
+
+       use_acpi_alarm = true;
 }
 #else
 static inline void use_acpi_alarm_quirks(void) { }
@@ -1305,7 +1305,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
                 * hardcode it on systems with a legacy PIC.
                 */
                if (nr_legacy_irqs())
-                       irq = 8;
+                       irq = RTC_IRQ;
 #endif
                return cmos_do_probe(&pnp->dev,
                                pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
index d043d30..f7343c2 100644 (file)
@@ -5,7 +5,6 @@
 // Author: Stephen Barber <smbarber@chromium.org>
 
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
index d21004a..ba14342 100644 (file)
@@ -75,7 +75,6 @@ static const struct spi_device_id ds1343_id[] = {
 MODULE_DEVICE_TABLE(spi, ds1343_id);
 
 struct ds1343_priv {
-       struct spi_device *spi;
        struct rtc_device *rtc;
        struct regmap *map;
        int irq;
@@ -362,12 +361,13 @@ static int ds1343_probe(struct spi_device *spi)
        if (!priv)
                return -ENOMEM;
 
-       priv->spi = spi;
-
        /* RTC DS1347 works in spi mode 3 and
-        * its chip select is active high
+        * its chip select is active high. Active high should be defined as
+        * "inverse polarity" as GPIO-based chip selects can be logically
+        * active high but inverted by the GPIO library.
         */
-       spi->mode = SPI_MODE_3 | SPI_CS_HIGH;
+       spi->mode |= SPI_MODE_3;
+       spi->mode ^= SPI_CS_HIGH;
        spi->bits_per_word = 8;
        res = spi_setup(spi);
        if (res)
index 443f6d0..0fb79c4 100644 (file)
@@ -78,7 +78,6 @@
 struct hym8563 {
        struct i2c_client       *client;
        struct rtc_device       *rtc;
-       bool                    valid;
 #ifdef CONFIG_COMMON_CLK
        struct clk_hw           clkout_hw;
 #endif
@@ -91,19 +90,19 @@ struct hym8563 {
 static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct hym8563 *hym8563 = i2c_get_clientdata(client);
        u8 buf[7];
        int ret;
 
-       if (!hym8563->valid) {
-               dev_warn(&client->dev, "no valid clock/calendar values available\n");
-               return -EPERM;
-       }
-
        ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
        if (ret < 0)
                return ret;
 
+       if (buf[0] & HYM8563_SEC_VL) {
+               dev_warn(&client->dev,
+                        "no valid clock/calendar values available\n");
+               return -EINVAL;
+       }
+
        tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK);
        tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK);
        tm->tm_hour = bcd2bin(buf[2] & HYM8563_HOUR_MASK);
@@ -118,7 +117,6 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
 static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct hym8563 *hym8563 = i2c_get_clientdata(client);
        u8 buf[7];
        int ret;
 
@@ -157,8 +155,6 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
        if (ret < 0)
                return ret;
 
-       hym8563->valid = true;
-
        return 0;
 }
 
@@ -556,9 +552,8 @@ static int hym8563_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
 
-       hym8563->valid = !(ret & HYM8563_SEC_VL);
        dev_dbg(&client->dev, "rtc information is %s\n",
-               hym8563->valid ? "valid" : "invalid");
+               (ret & HYM8563_SEC_VL) ? "invalid" : "valid");
 
        hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
                                                &hym8563_rtc_ops, THIS_MODULE);
index 07b30a3..6b24ac9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * MOXA ART RTC driver.
  *
@@ -7,10 +8,6 @@
  *
  * Based on code from
  * Moxa Technology Co., Ltd. <www.moxa.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/init.h>
index 9135e21..cda238d 100644 (file)
@@ -297,15 +297,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
 
        rtc->rtc_dev->ops = &mtk_rtc_ops;
 
-       ret = rtc_register_device(rtc->rtc_dev);
-       if (ret)
-               goto out_free_irq;
-
-       return 0;
-
-out_free_irq:
-       free_irq(rtc->irq, rtc);
-       return ret;
+       return rtc_register_device(rtc->rtc_dev);
 }
 
 #ifdef CONFIG_PM_SLEEP
index 988a4df..d4ed20f 100644 (file)
@@ -616,7 +616,7 @@ static int rtc_pinconf_get(struct pinctrl_dev *pctldev,
                break;
        default:
                return -ENOTSUPP;
-       };
+       }
 
        *config = pinconf_to_config_packed(param, arg);
 
index ba5baac..4e50d67 100644 (file)
@@ -199,11 +199,9 @@ static int pcf2127_rtc_ioctl(struct device *dev,
                if (ret)
                        return ret;
 
-               touser = touser & PCF2127_BIT_CTRL3_BLF ? 1 : 0;
+               touser = touser & PCF2127_BIT_CTRL3_BLF ? RTC_VL_BACKUP_LOW : 0;
 
-               if (copy_to_user((void __user *)arg, &touser, sizeof(int)))
-                       return -EFAULT;
-               return 0;
+               return put_user(touser, (unsigned int __user *)arg);
        default:
                return -ENOIOCTLCMD;
        }
index 1afa6d9..1db17ba 100644 (file)
@@ -289,21 +289,9 @@ static int pcf85063_ioctl(struct device *dev, unsigned int cmd,
                if (ret < 0)
                        return ret;
 
-               if (status & PCF85063_REG_SC_OS)
-                       dev_warn(&pcf85063->rtc->dev, "Voltage low, data loss detected.\n");
+               status = status & PCF85063_REG_SC_OS ? RTC_VL_DATA_INVALID : 0;
 
-               status &= PCF85063_REG_SC_OS;
-
-               if (copy_to_user((void __user *)arg, &status, sizeof(int)))
-                       return -EFAULT;
-
-               return 0;
-
-       case RTC_VL_CLR:
-               ret = regmap_update_bits(pcf85063->regmap, PCF85063_REG_SC,
-                                        PCF85063_REG_SC_OS, 0);
-
-               return ret;
+               return put_user(status, (unsigned int __user *)arg);
 
        default:
                return -ENOIOCTLCMD;
index b24c908..47e0f41 100644 (file)
@@ -282,11 +282,11 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
                ret = pcf8523_voltage_low(client);
                if (ret < 0)
                        return ret;
+               if (ret)
+                       ret = RTC_VL_BACKUP_LOW;
 
-               if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
-                       return -EFAULT;
+               return put_user(ret, (unsigned int __user *)arg);
 
-               return 0;
        default:
                return -ENOIOCTLCMD;
        }
index 3c322f3..2dc30ea 100644 (file)
@@ -22,8 +22,8 @@
 
 #define PCF8563_REG_ST1                0x00 /* status */
 #define PCF8563_REG_ST2                0x01
-#define PCF8563_BIT_AIE                (1 << 1)
-#define PCF8563_BIT_AF         (1 << 3)
+#define PCF8563_BIT_AIE                BIT(1)
+#define PCF8563_BIT_AF         BIT(3)
 #define PCF8563_BITS_ST2_N     (7 << 5)
 
 #define PCF8563_REG_SC         0x02 /* datetime */
@@ -76,7 +76,6 @@ struct pcf8563 {
         * 1970...2069.
         */
        int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
-       int voltage_low; /* incicates if a low_voltage was detected */
 
        struct i2c_client *client;
 #ifdef CONFIG_COMMON_CLK
@@ -208,7 +207,6 @@ static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
                return err;
 
        if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
-               pcf8563->voltage_low = 1;
                dev_err(&client->dev,
                        "low voltage detected, date/time is not reliable.\n");
                return -EINVAL;
@@ -276,43 +274,23 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
                                9 - PCF8563_REG_SC, buf + PCF8563_REG_SC);
 }
 
-#ifdef CONFIG_RTC_INTF_DEV
 static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
-       struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
-       struct rtc_time tm;
+       struct i2c_client *client = to_i2c_client(dev);
+       int ret;
 
        switch (cmd) {
        case RTC_VL_READ:
-               if (pcf8563->voltage_low)
-                       dev_info(dev, "low voltage detected, date/time is not reliable.\n");
-
-               if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
-                                       sizeof(int)))
-                       return -EFAULT;
-               return 0;
-       case RTC_VL_CLR:
-               /*
-                * Clear the VL bit in the seconds register in case
-                * the time has not been set already (which would
-                * have cleared it). This does not really matter
-                * because of the cached voltage_low value but do it
-                * anyway for consistency.
-                */
-               if (pcf8563_rtc_read_time(dev, &tm))
-                       pcf8563_rtc_set_time(dev, &tm);
-
-               /* Clear the cached value. */
-               pcf8563->voltage_low = 0;
+               ret = i2c_smbus_read_byte_data(client, PCF8563_REG_SC);
+               if (ret < 0)
+                       return ret;
 
-               return 0;
+               return put_user(ret & PCF8563_SC_LV ? RTC_VL_DATA_INVALID : 0,
+                               (unsigned int __user *)arg);
        default:
                return -ENOIOCTLCMD;
        }
 }
-#else
-#define pcf8563_rtc_ioctl NULL
-#endif
 
 static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
 {
index 6b7b3a6..a0ddc86 100644 (file)
@@ -428,21 +428,8 @@ static int rv3028_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
                if (ret < 0)
                        return ret;
 
-               if (status & RV3028_STATUS_PORF)
-                       dev_warn(&rv3028->rtc->dev, "Voltage low, data loss detected.\n");
-
-               status &= RV3028_STATUS_PORF;
-
-               if (copy_to_user((void __user *)arg, &status, sizeof(int)))
-                       return -EFAULT;
-
-               return 0;
-
-       case RTC_VL_CLR:
-               ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS,
-                                        RV3028_STATUS_PORF, 0);
-
-               return ret;
+               status = status & RV3028_STATUS_PORF ? RTC_VL_DATA_INVALID : 0;
+               return put_user(status, (unsigned int __user *)arg);
 
        default:
                return -ENOIOCTLCMD;
index 4cdf658..6271823 100644 (file)
 #define RV3029_CONTROL_E2P_TOV_MASK    0x3F /* XTAL turnover temp mask */
 
 /* user ram section */
-#define RV3029_USR1_RAM_PAGE           0x38
-#define RV3029_USR1_SECTION_LEN                0x04
-#define RV3029_USR2_RAM_PAGE           0x3C
-#define RV3029_USR2_SECTION_LEN                0x04
+#define RV3029_RAM_PAGE                        0x38
+#define RV3029_RAM_SECTION_LEN         8
 
 struct rv3029_data {
        struct device           *dev;
@@ -121,77 +119,13 @@ struct rv3029_data {
        int irq;
 };
 
-static int rv3029_read_regs(struct device *dev, u8 reg, u8 *buf,
-                           unsigned int len)
-{
-       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
-
-       if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
-           (reg + len > RV3029_USR1_RAM_PAGE + 8))
-               return -EINVAL;
-
-       return regmap_bulk_read(rv3029->regmap, reg, buf, len);
-}
-
-static int rv3029_write_regs(struct device *dev, u8 reg, u8 const buf[],
-                            unsigned int len)
-{
-       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
-
-       if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
-           (reg + len > RV3029_USR1_RAM_PAGE + 8))
-               return -EINVAL;
-
-       return regmap_bulk_write(rv3029->regmap, reg, buf, len);
-}
-
-static int rv3029_update_bits(struct device *dev, u8 reg, u8 mask, u8 set)
-{
-       u8 buf;
-       int ret;
-
-       ret = rv3029_read_regs(dev, reg, &buf, 1);
-       if (ret < 0)
-               return ret;
-       buf &= ~mask;
-       buf |= set & mask;
-       ret = rv3029_write_regs(dev, reg, &buf, 1);
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
-static int rv3029_get_sr(struct device *dev, u8 *buf)
-{
-       int ret = rv3029_read_regs(dev, RV3029_STATUS, buf, 1);
-
-       if (ret < 0)
-               return -EIO;
-       dev_dbg(dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
-       return 0;
-}
-
-static int rv3029_set_sr(struct device *dev, u8 val)
-{
-       u8 buf[1];
-       int sr;
-
-       buf[0] = val;
-       sr = rv3029_write_regs(dev, RV3029_STATUS, buf, 1);
-       dev_dbg(dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
-       if (sr < 0)
-               return -EIO;
-       return 0;
-}
-
-static int rv3029_eeprom_busywait(struct device *dev)
+static int rv3029_eeprom_busywait(struct rv3029_data *rv3029)
 {
+       unsigned int sr;
        int i, ret;
-       u8 sr;
 
        for (i = 100; i > 0; i--) {
-               ret = rv3029_get_sr(dev, &sr);
+               ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
                if (ret < 0)
                        break;
                if (!(sr & RV3029_STATUS_EEBUSY))
@@ -199,126 +133,128 @@ static int rv3029_eeprom_busywait(struct device *dev)
                usleep_range(1000, 10000);
        }
        if (i <= 0) {
-               dev_err(dev, "EEPROM busy wait timeout.\n");
+               dev_err(rv3029->dev, "EEPROM busy wait timeout.\n");
                return -ETIMEDOUT;
        }
 
        return ret;
 }
 
-static int rv3029_eeprom_exit(struct device *dev)
+static int rv3029_eeprom_exit(struct rv3029_data *rv3029)
 {
        /* Re-enable eeprom refresh */
-       return rv3029_update_bits(dev, RV3029_ONOFF_CTRL,
+       return regmap_update_bits(rv3029->regmap, RV3029_ONOFF_CTRL,
                                  RV3029_ONOFF_CTRL_EERE,
                                  RV3029_ONOFF_CTRL_EERE);
 }
 
-static int rv3029_eeprom_enter(struct device *dev)
+static int rv3029_eeprom_enter(struct rv3029_data *rv3029)
 {
+       unsigned int sr;
        int ret;
-       u8 sr;
 
        /* Check whether we are in the allowed voltage range. */
-       ret = rv3029_get_sr(dev, &sr);
+       ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
        if (ret < 0)
                return ret;
-       if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
+       if (sr & RV3029_STATUS_VLOW2)
+               return -ENODEV;
+       if (sr & RV3029_STATUS_VLOW1) {
                /* We clear the bits and retry once just in case
                 * we had a brown out in early startup.
                 */
-               sr &= ~RV3029_STATUS_VLOW1;
-               sr &= ~RV3029_STATUS_VLOW2;
-               ret = rv3029_set_sr(dev, sr);
+               ret = regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+                                        RV3029_STATUS_VLOW1, 0);
                if (ret < 0)
                        return ret;
                usleep_range(1000, 10000);
-               ret = rv3029_get_sr(dev, &sr);
+               ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
                if (ret < 0)
                        return ret;
-               if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
-                       dev_err(dev,
+               if (sr & RV3029_STATUS_VLOW1) {
+                       dev_err(rv3029->dev,
                                "Supply voltage is too low to safely access the EEPROM.\n");
                        return -ENODEV;
                }
        }
 
        /* Disable eeprom refresh. */
-       ret = rv3029_update_bits(dev, RV3029_ONOFF_CTRL, RV3029_ONOFF_CTRL_EERE,
-                                0);
+       ret = regmap_update_bits(rv3029->regmap, RV3029_ONOFF_CTRL,
+                                RV3029_ONOFF_CTRL_EERE, 0);
        if (ret < 0)
                return ret;
 
        /* Wait for any previous eeprom accesses to finish. */
-       ret = rv3029_eeprom_busywait(dev);
+       ret = rv3029_eeprom_busywait(rv3029);
        if (ret < 0)
-               rv3029_eeprom_exit(dev);
+               rv3029_eeprom_exit(rv3029);
 
        return ret;
 }
 
-static int rv3029_eeprom_read(struct device *dev, u8 reg,
+static int rv3029_eeprom_read(struct rv3029_data *rv3029, u8 reg,
                              u8 buf[], size_t len)
 {
        int ret, err;
 
-       err = rv3029_eeprom_enter(dev);
+       err = rv3029_eeprom_enter(rv3029);
        if (err < 0)
                return err;
 
-       ret = rv3029_read_regs(dev, reg, buf, len);
+       ret = regmap_bulk_read(rv3029->regmap, reg, buf, len);
 
-       err = rv3029_eeprom_exit(dev);
+       err = rv3029_eeprom_exit(rv3029);
        if (err < 0)
                return err;
 
        return ret;
 }
 
-static int rv3029_eeprom_write(struct device *dev, u8 reg,
+static int rv3029_eeprom_write(struct rv3029_data *rv3029, u8 reg,
                               u8 const buf[], size_t len)
 {
+       unsigned int tmp;
        int ret, err;
        size_t i;
-       u8 tmp;
 
-       err = rv3029_eeprom_enter(dev);
+       err = rv3029_eeprom_enter(rv3029);
        if (err < 0)
                return err;
 
        for (i = 0; i < len; i++, reg++) {
-               ret = rv3029_read_regs(dev, reg, &tmp, 1);
+               ret = regmap_read(rv3029->regmap, reg, &tmp);
                if (ret < 0)
                        break;
                if (tmp != buf[i]) {
-                       ret = rv3029_write_regs(dev, reg, &buf[i], 1);
+                       tmp = buf[i];
+                       ret = regmap_write(rv3029->regmap, reg, tmp);
                        if (ret < 0)
                                break;
                }
-               ret = rv3029_eeprom_busywait(dev);
+               ret = rv3029_eeprom_busywait(rv3029);
                if (ret < 0)
                        break;
        }
 
-       err = rv3029_eeprom_exit(dev);
+       err = rv3029_eeprom_exit(rv3029);
        if (err < 0)
                return err;
 
        return ret;
 }
 
-static int rv3029_eeprom_update_bits(struct device *dev,
+static int rv3029_eeprom_update_bits(struct rv3029_data *rv3029,
                                     u8 reg, u8 mask, u8 set)
 {
        u8 buf;
        int ret;
 
-       ret = rv3029_eeprom_read(dev, reg, &buf, 1);
+       ret = rv3029_eeprom_read(rv3029, reg, &buf, 1);
        if (ret < 0)
                return ret;
        buf &= ~mask;
        buf |= set & mask;
-       ret = rv3029_eeprom_write(dev, reg, &buf, 1);
+       ret = rv3029_eeprom_write(rv3029, reg, &buf, 1);
        if (ret < 0)
                return ret;
 
@@ -330,20 +266,20 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
        struct device *dev = dev_id;
        struct rv3029_data *rv3029 = dev_get_drvdata(dev);
        struct mutex *lock = &rv3029->rtc->ops_lock;
+       unsigned int flags, controls;
        unsigned long events = 0;
-       u8 flags, controls;
        int ret;
 
        mutex_lock(lock);
 
-       ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+       ret = regmap_read(rv3029->regmap, RV3029_IRQ_CTRL, &controls);
        if (ret) {
                dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
                mutex_unlock(lock);
                return IRQ_NONE;
        }
 
-       ret = rv3029_read_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
+       ret = regmap_read(rv3029->regmap, RV3029_IRQ_FLAGS, &flags);
        if (ret) {
                dev_warn(dev, "Read IRQ Flags Register error %d\n", ret);
                mutex_unlock(lock);
@@ -358,8 +294,8 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
 
        if (events) {
                rtc_update_irq(rv3029->rtc, 1, events);
-               rv3029_write_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
-               rv3029_write_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+               regmap_write(rv3029->regmap, RV3029_IRQ_FLAGS, flags);
+               regmap_write(rv3029->regmap, RV3029_IRQ_CTRL, controls);
        }
        mutex_unlock(lock);
 
@@ -368,22 +304,22 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
 
 static int rv3029_read_time(struct device *dev, struct rtc_time *tm)
 {
-       u8 buf[1];
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+       unsigned int sr;
        int ret;
        u8 regs[RV3029_WATCH_SECTION_LEN] = { 0, };
 
-       ret = rv3029_get_sr(dev, buf);
-       if (ret < 0) {
-               dev_err(dev, "%s: reading SR failed\n", __func__);
-               return -EIO;
-       }
+       ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
+       if (ret < 0)
+               return ret;
+
+       if (sr & (RV3029_STATUS_VLOW2 | RV3029_STATUS_PON))
+               return -EINVAL;
 
-       ret = rv3029_read_regs(dev, RV3029_W_SEC, regs,
+       ret = regmap_bulk_read(rv3029->regmap, RV3029_W_SEC, regs,
                               RV3029_WATCH_SECTION_LEN);
-       if (ret < 0) {
-               dev_err(dev, "%s: reading RTC section failed\n", __func__);
+       if (ret < 0)
                return ret;
-       }
 
        tm->tm_sec = bcd2bin(regs[RV3029_W_SEC - RV3029_W_SEC]);
        tm->tm_min = bcd2bin(regs[RV3029_W_MINUTES - RV3029_W_SEC]);
@@ -411,34 +347,24 @@ static int rv3029_read_time(struct device *dev, struct rtc_time *tm)
 
 static int rv3029_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
        struct rtc_time *const tm = &alarm->time;
+       unsigned int controls, flags;
        int ret;
-       u8 regs[8], controls, flags;
-
-       ret = rv3029_get_sr(dev, regs);
-       if (ret < 0) {
-               dev_err(dev, "%s: reading SR failed\n", __func__);
-               return -EIO;
-       }
+       u8 regs[8];
 
-       ret = rv3029_read_regs(dev, RV3029_A_SC, regs,
+       ret = regmap_bulk_read(rv3029->regmap, RV3029_A_SC, regs,
                               RV3029_ALARM_SECTION_LEN);
-
-       if (ret < 0) {
-               dev_err(dev, "%s: reading alarm section failed\n", __func__);
+       if (ret < 0)
                return ret;
-       }
 
-       ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
-       if (ret) {
-               dev_err(dev, "Read IRQ Control Register error %d\n", ret);
+       ret = regmap_read(rv3029->regmap, RV3029_IRQ_CTRL, &controls);
+       if (ret)
                return ret;
-       }
-       ret = rv3029_read_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
-       if (ret < 0) {
-               dev_err(dev, "Read IRQ Flags Register error %d\n", ret);
+
+       ret = regmap_read(rv3029->regmap, RV3029_IRQ_FLAGS, &flags);
+       if (ret < 0)
                return ret;
-       }
 
        tm->tm_sec = bcd2bin(regs[RV3029_A_SC - RV3029_A_SC] & 0x7f);
        tm->tm_min = bcd2bin(regs[RV3029_A_MN - RV3029_A_SC] & 0x7f);
@@ -456,50 +382,20 @@ static int rv3029_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 
 static int rv3029_alarm_irq_enable(struct device *dev, unsigned int enable)
 {
-       int ret;
-       u8 controls;
-
-       ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
-       if (ret < 0) {
-               dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
-               return ret;
-       }
-
-       /* enable/disable AIE irq */
-       if (enable)
-               controls |= RV3029_IRQ_CTRL_AIE;
-       else
-               controls &= ~RV3029_IRQ_CTRL_AIE;
-
-       ret = rv3029_write_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
-       if (ret < 0) {
-               dev_err(dev, "can't update INT reg\n");
-               return ret;
-       }
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 
-       return 0;
+       return regmap_update_bits(rv3029->regmap, RV3029_IRQ_CTRL,
+                                 RV3029_IRQ_CTRL_AIE,
+                                 enable ? RV3029_IRQ_CTRL_AIE : 0);
 }
 
 static int rv3029_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
        struct rtc_time *const tm = &alarm->time;
        int ret;
        u8 regs[8];
 
-       /*
-        * The clock has an 8 bit wide bcd-coded register (they never learn)
-        * for the year. tm_year is an offset from 1900 and we are interested
-        * in the 2000-2099 range, so any value less than 100 is invalid.
-       */
-       if (tm->tm_year < 100)
-               return -EINVAL;
-
-       ret = rv3029_get_sr(dev, regs);
-       if (ret < 0) {
-               dev_err(dev, "%s: reading SR failed\n", __func__);
-               return -EIO;
-       }
-
        /* Activate all the alarms with AE_x bit */
        regs[RV3029_A_SC - RV3029_A_SC] = bin2bcd(tm->tm_sec) | RV3029_A_AE_X;
        regs[RV3029_A_MN - RV3029_A_SC] = bin2bcd(tm->tm_min) | RV3029_A_AE_X;
@@ -515,39 +411,20 @@ static int rv3029_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
                | RV3029_A_AE_X;
 
        /* Write the alarm */
-       ret = rv3029_write_regs(dev, RV3029_A_SC, regs,
+       ret = regmap_bulk_write(rv3029->regmap, RV3029_A_SC, regs,
                                RV3029_ALARM_SECTION_LEN);
        if (ret < 0)
                return ret;
 
-       if (alarm->enabled) {
-               /* enable AIE irq */
-               ret = rv3029_alarm_irq_enable(dev, 1);
-               if (ret)
-                       return ret;
-       } else {
-               /* disable AIE irq */
-               ret = rv3029_alarm_irq_enable(dev, 0);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
+       return rv3029_alarm_irq_enable(dev, alarm->enabled);
 }
 
 static int rv3029_set_time(struct device *dev, struct rtc_time *tm)
 {
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
        u8 regs[8];
        int ret;
 
-       /*
-        * The clock has an 8 bit wide bcd-coded register (they never learn)
-        * for the year. tm_year is an offset from 1900 and we are interested
-        * in the 2000-2099 range, so any value less than 100 is invalid.
-       */
-       if (tm->tm_year < 100)
-               return -EINVAL;
-
        regs[RV3029_W_SEC - RV3029_W_SEC] = bin2bcd(tm->tm_sec);
        regs[RV3029_W_MINUTES - RV3029_W_SEC] = bin2bcd(tm->tm_min);
        regs[RV3029_W_HOURS - RV3029_W_SEC] = bin2bcd(tm->tm_hour);
@@ -556,24 +433,55 @@ static int rv3029_set_time(struct device *dev, struct rtc_time *tm)
        regs[RV3029_W_DAYS - RV3029_W_SEC] = bin2bcd(tm->tm_wday + 1) & 0x7;
        regs[RV3029_W_YEARS - RV3029_W_SEC] = bin2bcd(tm->tm_year - 100);
 
-       ret = rv3029_write_regs(dev, RV3029_W_SEC, regs,
+       ret = regmap_bulk_write(rv3029->regmap, RV3029_W_SEC, regs,
                                RV3029_WATCH_SECTION_LEN);
        if (ret < 0)
                return ret;
 
-       ret = rv3029_get_sr(dev, regs);
-       if (ret < 0) {
-               dev_err(dev, "%s: reading SR failed\n", __func__);
-               return ret;
-       }
-       /* clear PON bit */
-       ret = rv3029_set_sr(dev, (regs[0] & ~RV3029_STATUS_PON));
-       if (ret < 0) {
-               dev_err(dev, "%s: reading SR failed\n", __func__);
-               return ret;
+       /* clear PON and VLOW2 bits */
+       return regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+                                 RV3029_STATUS_PON | RV3029_STATUS_VLOW2, 0);
+}
+
+static int rv3029_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+       unsigned long vl = 0;
+       int sr, ret = 0;
+
+       switch (cmd) {
+       case RTC_VL_READ:
+               ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
+               if (ret < 0)
+                       return ret;
+
+               if (sr & RV3029_STATUS_VLOW1)
+                       vl = RTC_VL_ACCURACY_LOW;
+
+               if (sr & (RV3029_STATUS_VLOW2 | RV3029_STATUS_PON))
+                       vl |= RTC_VL_DATA_INVALID;
+
+               return put_user(vl, (unsigned int __user *)arg);
+
+       case RTC_VL_CLR:
+               return regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+                                         RV3029_STATUS_VLOW1, 0);
+
+       default:
+               return -ENOIOCTLCMD;
        }
+}
 
-       return 0;
+static int rv3029_nvram_write(void *priv, unsigned int offset, void *val,
+                             size_t bytes)
+{
+       return regmap_bulk_write(priv, RV3029_RAM_PAGE + offset, val, bytes);
+}
+
+static int rv3029_nvram_read(void *priv, unsigned int offset, void *val,
+                            size_t bytes)
+{
+       return regmap_bulk_read(priv, RV3029_RAM_PAGE + offset, val, bytes);
 }
 
 static const struct rv3029_trickle_tab_elem {
@@ -635,6 +543,7 @@ static const struct rv3029_trickle_tab_elem {
 
 static void rv3029_trickle_config(struct device *dev)
 {
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
        struct device_node *of_node = dev->of_node;
        const struct rv3029_trickle_tab_elem *elem;
        int i, err;
@@ -661,7 +570,7 @@ static void rv3029_trickle_config(struct device *dev)
                         "Trickle charger enabled at %d ohms resistance.\n",
                         elem->r);
        }
-       err = rv3029_eeprom_update_bits(dev, RV3029_CONTROL_E2P_EECTRL,
+       err = rv3029_eeprom_update_bits(rv3029, RV3029_CONTROL_E2P_EECTRL,
                                        RV3029_TRICKLE_MASK,
                                        trickle_set_bits);
        if (err < 0)
@@ -670,12 +579,12 @@ static void rv3029_trickle_config(struct device *dev)
 
 #ifdef CONFIG_RTC_DRV_RV3029_HWMON
 
-static int rv3029_read_temp(struct device *dev, int *temp_mC)
+static int rv3029_read_temp(struct rv3029_data *rv3029, int *temp_mC)
 {
+       unsigned int temp;
        int ret;
-       u8 temp;
 
-       ret = rv3029_read_regs(dev, RV3029_TEMP_PAGE, &temp, 1);
+       ret = regmap_read(rv3029->regmap, RV3029_TEMP_PAGE, &temp);
        if (ret < 0)
                return ret;
 
@@ -688,9 +597,10 @@ static ssize_t rv3029_hwmon_show_temp(struct device *dev,
                                      struct device_attribute *attr,
                                      char *buf)
 {
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
        int ret, temp_mC;
 
-       ret = rv3029_read_temp(dev, &temp_mC);
+       ret = rv3029_read_temp(rv3029, &temp_mC);
        if (ret < 0)
                return ret;
 
@@ -702,9 +612,10 @@ static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
                                                const char *buf,
                                                size_t count)
 {
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+       unsigned int th_set_bits = 0;
        unsigned long interval_ms;
        int ret;
-       u8 th_set_bits = 0;
 
        ret = kstrtoul(buf, 10, &interval_ms);
        if (ret < 0)
@@ -715,7 +626,7 @@ static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
                if (interval_ms >= 16000)
                        th_set_bits |= RV3029_EECTRL_THP;
        }
-       ret = rv3029_eeprom_update_bits(dev, RV3029_CONTROL_E2P_EECTRL,
+       ret = rv3029_eeprom_update_bits(rv3029, RV3029_CONTROL_E2P_EECTRL,
                                        RV3029_EECTRL_THE | RV3029_EECTRL_THP,
                                        th_set_bits);
        if (ret < 0)
@@ -728,10 +639,11 @@ static ssize_t rv3029_hwmon_show_update_interval(struct device *dev,
                                                 struct device_attribute *attr,
                                                 char *buf)
 {
+       struct rv3029_data *rv3029 = dev_get_drvdata(dev);
        int ret, interval_ms;
        u8 eectrl;
 
-       ret = rv3029_eeprom_read(dev, RV3029_CONTROL_E2P_EECTRL,
+       ret = rv3029_eeprom_read(rv3029, RV3029_CONTROL_E2P_EECTRL,
                                 &eectrl, 1);
        if (ret < 0)
                return ret;
@@ -785,14 +697,23 @@ static void rv3029_hwmon_register(struct device *dev, const char *name)
 static struct rtc_class_ops rv3029_rtc_ops = {
        .read_time      = rv3029_read_time,
        .set_time       = rv3029_set_time,
+       .ioctl          = rv3029_ioctl,
 };
 
 static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
                        const char *name)
 {
        struct rv3029_data *rv3029;
+       struct nvmem_config nvmem_cfg = {
+               .name = "rv3029_nvram",
+               .word_size = 1,
+               .stride = 1,
+               .size = RV3029_RAM_SECTION_LEN,
+               .type = NVMEM_TYPE_BATTERY_BACKED,
+               .reg_read = rv3029_nvram_read,
+               .reg_write = rv3029_nvram_write,
+       };
        int rc = 0;
-       u8 buf[1];
 
        rv3029 = devm_kzalloc(dev, sizeof(*rv3029), GFP_KERNEL);
        if (!rv3029)
@@ -803,21 +724,12 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
        rv3029->dev = dev;
        dev_set_drvdata(dev, rv3029);
 
-       rc = rv3029_get_sr(dev, buf);
-       if (rc < 0) {
-               dev_err(dev, "reading status failed\n");
-               return rc;
-       }
-
        rv3029_trickle_config(dev);
        rv3029_hwmon_register(dev, name);
 
-       rv3029->rtc = devm_rtc_device_register(dev, name, &rv3029_rtc_ops,
-                                              THIS_MODULE);
-       if (IS_ERR(rv3029->rtc)) {
-               dev_err(dev, "unable to register the class device\n");
+       rv3029->rtc = devm_rtc_allocate_device(dev);
+       if (IS_ERR(rv3029->rtc))
                return PTR_ERR(rv3029->rtc);
-       }
 
        if (rv3029->irq > 0) {
                rc = devm_request_threaded_irq(dev, rv3029->irq,
@@ -834,20 +746,48 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
                }
        }
 
+       rv3029->rtc->ops = &rv3029_rtc_ops;
+       rv3029->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+       rv3029->rtc->range_max = RTC_TIMESTAMP_END_2079;
+
+       rc = rtc_register_device(rv3029->rtc);
+       if (rc)
+               return rc;
+
+       nvmem_cfg.priv = rv3029->regmap;
+       rtc_nvmem_register(rv3029->rtc, &nvmem_cfg);
+
        return 0;
 }
 
+static const struct regmap_range rv3029_holes_range[] = {
+       regmap_reg_range(0x05, 0x07),
+       regmap_reg_range(0x0f, 0x0f),
+       regmap_reg_range(0x17, 0x17),
+       regmap_reg_range(0x1a, 0x1f),
+       regmap_reg_range(0x21, 0x27),
+       regmap_reg_range(0x34, 0x37),
+};
+
+static const struct regmap_access_table rv3029_regs = {
+       .no_ranges =    rv3029_holes_range,
+       .n_no_ranges =  ARRAY_SIZE(rv3029_holes_range),
+};
+
+static const struct regmap_config config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .rd_table = &rv3029_regs,
+       .wr_table = &rv3029_regs,
+       .max_register = 0x3f,
+};
+
 #if IS_ENABLED(CONFIG_I2C)
 
 static int rv3029_i2c_probe(struct i2c_client *client,
                            const struct i2c_device_id *id)
 {
        struct regmap *regmap;
-       static const struct regmap_config config = {
-               .reg_bits = 8,
-               .val_bits = 8,
-       };
-
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
                                     I2C_FUNC_SMBUS_BYTE)) {
                dev_err(&client->dev, "Adapter does not support SMBUS_I2C_BLOCK or SMBUS_I2C_BYTE\n");
@@ -855,11 +795,8 @@ static int rv3029_i2c_probe(struct i2c_client *client,
        }
 
        regmap = devm_regmap_init_i2c(client, &config);
-       if (IS_ERR(regmap)) {
-               dev_err(&client->dev, "%s: regmap allocation failed: %ld\n",
-                       __func__, PTR_ERR(regmap));
+       if (IS_ERR(regmap))
                return PTR_ERR(regmap);
-       }
 
        return rv3029_probe(&client->dev, regmap, client->irq, client->name);
 }
@@ -873,24 +810,20 @@ MODULE_DEVICE_TABLE(i2c, rv3029_id);
 
 static const struct of_device_id rv3029_of_match[] = {
        { .compatible = "microcrystal,rv3029" },
-       /* Backward compatibility only, do not use compatibles below: */
-       { .compatible = "rv3029" },
-       { .compatible = "rv3029c2" },
-       { .compatible = "mc,rv3029c2" },
        { }
 };
 MODULE_DEVICE_TABLE(of, rv3029_of_match);
 
 static struct i2c_driver rv3029_driver = {
        .driver = {
-               .name = "rtc-rv3029c2",
+               .name = "rv3029",
                .of_match_table = of_match_ptr(rv3029_of_match),
        },
        .probe          = rv3029_i2c_probe,
        .id_table       = rv3029_id,
 };
 
-static int rv3029_register_driver(void)
+static int __init rv3029_register_driver(void)
 {
        return i2c_add_driver(&rv3029_driver);
 }
@@ -902,7 +835,7 @@ static void rv3029_unregister_driver(void)
 
 #else
 
-static int rv3029_register_driver(void)
+static int __init rv3029_register_driver(void)
 {
        return 0;
 }
@@ -917,18 +850,11 @@ static void rv3029_unregister_driver(void)
 
 static int rv3049_probe(struct spi_device *spi)
 {
-       static const struct regmap_config config = {
-               .reg_bits = 8,
-               .val_bits = 8,
-       };
        struct regmap *regmap;
 
        regmap = devm_regmap_init_spi(spi, &config);
-       if (IS_ERR(regmap)) {
-               dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
-                       __func__, PTR_ERR(regmap));
+       if (IS_ERR(regmap))
                return PTR_ERR(regmap);
-       }
 
        return rv3029_probe(&spi->dev, regmap, spi->irq, "rv3049");
 }
@@ -940,24 +866,24 @@ static struct spi_driver rv3049_driver = {
        .probe   = rv3049_probe,
 };
 
-static int rv3049_register_driver(void)
+static int __init rv3049_register_driver(void)
 {
        return spi_register_driver(&rv3049_driver);
 }
 
-static void rv3049_unregister_driver(void)
+static void __exit rv3049_unregister_driver(void)
 {
        spi_unregister_driver(&rv3049_driver);
 }
 
 #else
 
-static int rv3049_register_driver(void)
+static int __init rv3049_register_driver(void)
 {
        return 0;
 }
 
-static void rv3049_unregister_driver(void)
+static void __exit rv3049_unregister_driver(void)
 {
 }
 
@@ -968,16 +894,12 @@ static int __init rv30x9_init(void)
        int ret;
 
        ret = rv3029_register_driver();
-       if (ret) {
-               pr_err("Failed to register rv3029 driver: %d\n", ret);
+       if (ret)
                return ret;
-       }
 
        ret = rv3049_register_driver();
-       if (ret) {
-               pr_err("Failed to register rv3049 driver: %d\n", ret);
+       if (ret)
                rv3029_unregister_driver();
-       }
 
        return ret;
 }
index 4960f0a..93c3a6b 100644 (file)
@@ -411,6 +411,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct rv8803_data *rv8803 = dev_get_drvdata(dev);
+       unsigned int vl = 0;
        int flags, ret = 0;
 
        switch (cmd) {
@@ -419,18 +420,15 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
                if (flags < 0)
                        return flags;
 
-               if (flags & RV8803_FLAG_V1F)
+               if (flags & RV8803_FLAG_V1F) {
                        dev_warn(&client->dev, "Voltage low, temperature compensation stopped.\n");
+                       vl = RTC_VL_ACCURACY_LOW;
+               }
 
                if (flags & RV8803_FLAG_V2F)
-                       dev_warn(&client->dev, "Voltage low, data loss detected.\n");
-
-               flags &= RV8803_FLAG_V1F | RV8803_FLAG_V2F;
+                       vl |= RTC_VL_DATA_INVALID;
 
-               if (copy_to_user((void __user *)arg, &flags, sizeof(int)))
-                       return -EFAULT;
-
-               return 0;
+               return put_user(vl, (unsigned int __user *)arg);
 
        case RTC_VL_CLR:
                mutex_lock(&rv8803->flags_lock);
@@ -440,7 +438,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
                        return flags;
                }
 
-               flags &= ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F);
+               flags &= ~RV8803_FLAG_V1F;
                ret = rv8803_write_reg(client, RV8803_FLAG, flags);
                mutex_unlock(&rv8803->flags_lock);
                if (ret)
index 8102469..fe01015 100644 (file)
@@ -389,9 +389,8 @@ static int rx8010_alarm_irq_enable(struct device *dev,
 
 static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct rx8010_data *rx8010 = dev_get_drvdata(dev);
-       int ret, tmp;
+       int tmp;
        int flagreg;
 
        switch (cmd) {
@@ -400,24 +399,8 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
                if (flagreg < 0)
                        return flagreg;
 
-               tmp = !!(flagreg & RX8010_FLAG_VLF);
-               if (copy_to_user((void __user *)arg, &tmp, sizeof(int)))
-                       return -EFAULT;
-
-               return 0;
-
-       case RTC_VL_CLR:
-               flagreg = i2c_smbus_read_byte_data(rx8010->client, RX8010_FLAG);
-               if (flagreg < 0) {
-                       return flagreg;
-               }
-
-               flagreg &= ~RX8010_FLAG_VLF;
-               ret = i2c_smbus_write_byte_data(client, RX8010_FLAG, flagreg);
-               if (ret < 0)
-                       return ret;
-
-               return 0;
+               tmp = flagreg & RX8010_FLAG_VLF ? RTC_VL_DATA_INVALID : 0;
+               return put_user(tmp, (unsigned int __user *)arg);
 
        default:
                return -ENOIOCTLCMD;
@@ -482,7 +465,7 @@ static int rx8010_probe(struct i2c_client *client,
 
        rx8010->rtc->max_user_freq = 1;
 
-       return err;
+       return 0;
 }
 
 static struct i2c_driver rx8010_driver = {
index b9bda10..a24f858 100644 (file)
@@ -67,7 +67,6 @@ static const struct i2c_device_id rx8025_id[] = {
 MODULE_DEVICE_TABLE(i2c, rx8025_id);
 
 struct rx8025_data {
-       struct i2c_client *client;
        struct rtc_device *rtc;
        u8 ctrl1;
 };
@@ -103,10 +102,10 @@ static s32 rx8025_write_regs(const struct i2c_client *client,
 
 static int rx8025_check_validity(struct device *dev)
 {
-       struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+       struct i2c_client *client = to_i2c_client(dev);
        int ctrl2;
 
-       ctrl2 = rx8025_read_reg(rx8025->client, RX8025_REG_CTRL2);
+       ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
        if (ctrl2 < 0)
                return ctrl2;
 
@@ -178,6 +177,7 @@ out:
 
 static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
 {
+       struct i2c_client *client = to_i2c_client(dev);
        struct rx8025_data *rx8025 = dev_get_drvdata(dev);
        u8 date[7];
        int err;
@@ -186,7 +186,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
        if (err)
                return err;
 
-       err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+       err = rx8025_read_regs(client, RX8025_REG_SEC, 7, date);
        if (err)
                return err;
 
@@ -211,6 +211,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
 
 static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
 {
+       struct i2c_client *client = to_i2c_client(dev);
        struct rx8025_data *rx8025 = dev_get_drvdata(dev);
        u8 date[7];
        int ret;
@@ -237,11 +238,11 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
 
        dev_dbg(dev, "%s: write %7ph\n", __func__, date);
 
-       ret = rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+       ret = rx8025_write_regs(client, RX8025_REG_SEC, 7, date);
        if (ret < 0)
                return ret;
 
-       return rx8025_reset_validity(rx8025->client);
+       return rx8025_reset_validity(client);
 }
 
 static int rx8025_init_client(struct i2c_client *client)
@@ -251,7 +252,7 @@ static int rx8025_init_client(struct i2c_client *client)
        int need_clear = 0;
        int err;
 
-       err = rx8025_read_regs(rx8025->client, RX8025_REG_CTRL1, 2, ctrl);
+       err = rx8025_read_regs(client, RX8025_REG_CTRL1, 2, ctrl);
        if (err)
                goto out;
 
@@ -280,8 +281,8 @@ out:
 /* Alarm support */
 static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
+       struct i2c_client *client = to_i2c_client(dev);
        struct rx8025_data *rx8025 = dev_get_drvdata(dev);
-       struct i2c_client *client = rx8025->client;
        u8 ald[2];
        int ctrl2, err;
 
@@ -347,18 +348,18 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 
        if (rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE) {
                rx8025->ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
-               err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+               err = rx8025_write_reg(client, RX8025_REG_CTRL1,
                                       rx8025->ctrl1);
                if (err)
                        return err;
        }
-       err = rx8025_write_regs(rx8025->client, RX8025_REG_ALDMIN, 2, ald);
+       err = rx8025_write_regs(client, RX8025_REG_ALDMIN, 2, ald);
        if (err)
                return err;
 
        if (t->enabled) {
                rx8025->ctrl1 |= RX8025_BIT_CTRL1_DALE;
-               err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+               err = rx8025_write_reg(client, RX8025_REG_CTRL1,
                                       rx8025->ctrl1);
                if (err)
                        return err;
@@ -369,6 +370,7 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 
 static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
+       struct i2c_client *client = to_i2c_client(dev);
        struct rx8025_data *rx8025 = dev_get_drvdata(dev);
        u8 ctrl1;
        int err;
@@ -381,7 +383,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
 
        if (ctrl1 != rx8025->ctrl1) {
                rx8025->ctrl1 = ctrl1;
-               err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+               err = rx8025_write_reg(client, RX8025_REG_CTRL1,
                                       rx8025->ctrl1);
                if (err)
                        return err;
@@ -516,7 +518,6 @@ static int rx8025_probe(struct i2c_client *client,
        if (!rx8025)
                return -ENOMEM;
 
-       rx8025->client = client;
        i2c_set_clientdata(client, rx8025);
 
        err = rx8025_init_client(client);
index 781cabb..d774aa1 100644 (file)
@@ -897,8 +897,11 @@ static int stm32_rtc_resume(struct device *dev)
        }
 
        ret = stm32_rtc_wait_sync(rtc);
-       if (ret < 0)
+       if (ret < 0) {
+               if (rtc->data->has_pclk)
+                       clk_disable_unprepare(rtc->pclk);
                return ret;
+       }
 
        if (device_may_wakeup(dev))
                return disable_irq_wake(rtc->irq_alarm);
index 859d901..e39af2d 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/mfd/tps6586x.h>
 #include <linux/module.h>
@@ -267,6 +268,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
        rtc->rtc->start_secs = mktime64(2009, 1, 1, 0, 0, 0);
        rtc->rtc->set_start_time = true;
 
+       irq_set_status_flags(rtc->irq, IRQ_NOAUTOEN);
+
        ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
                                tps6586x_rtc_irq,
                                IRQF_ONESHOT,
@@ -276,7 +279,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
                                rtc->irq, ret);
                goto fail_rtc_register;
        }
-       disable_irq(rtc->irq);
 
        ret = rtc_register_device(rtc->rtc);
        if (ret)
index 5396905..5786866 100644 (file)
@@ -94,7 +94,7 @@ static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
                 * RTC has updated the CURRENT_TIME with the time written into
                 * SET_TIME_WRITE register.
                 */
-               rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_CUR_TM), tm);
+               read_time = readl(xrtcdev->reg_base + RTC_CUR_TM);
        } else {
                /*
                 * Time written in SET_TIME_WRITE has not yet updated into
@@ -104,8 +104,8 @@ static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
                 * reading.
                 */
                read_time = readl(xrtcdev->reg_base + RTC_SET_TM_RD) - 1;
-               rtc_time64_to_tm(read_time, tm);
        }
+       rtc_time64_to_tm(read_time, tm);
 
        return 0;
 }
index 8d4d69e..62a859e 100644 (file)
@@ -320,13 +320,12 @@ out_error:
 #endif                         /* CONFIG_DASD_PROFILE */
 }
 
-static const struct file_operations dasd_stats_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = dasd_stats_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = dasd_stats_proc_write,
+static const struct proc_ops dasd_stats_proc_ops = {
+       .proc_open      = dasd_stats_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = dasd_stats_proc_write,
 };
 
 /*
@@ -347,7 +346,7 @@ dasd_proc_init(void)
        dasd_statistics_entry = proc_create("statistics",
                                            S_IFREG | S_IRUGO | S_IWUSR,
                                            dasd_proc_root_entry,
-                                           &dasd_stats_proc_fops);
+                                           &dasd_stats_proc_ops);
        if (!dasd_statistics_entry)
                goto out_nostatistics;
        return 0;
index 2a3f874..da642e8 100644 (file)
@@ -398,12 +398,12 @@ cio_ignore_proc_open(struct inode *inode, struct file *file)
                                sizeof(struct ccwdev_iter));
 }
 
-static const struct file_operations cio_ignore_proc_fops = {
-       .open    = cio_ignore_proc_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release_private,
-       .write   = cio_ignore_write,
+static const struct proc_ops cio_ignore_proc_ops = {
+       .proc_open      = cio_ignore_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release_private,
+       .proc_write     = cio_ignore_write,
 };
 
 static int
@@ -412,7 +412,7 @@ cio_ignore_proc_init (void)
        struct proc_dir_entry *entry;
 
        entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
-                           &cio_ignore_proc_fops);
+                           &cio_ignore_proc_ops);
        if (!entry)
                return -ENOENT;
        return 0;
index 8318504..94edbb3 100644 (file)
@@ -1372,18 +1372,17 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
        return ret ? ret : count;
 }
 
-static const struct file_operations cio_settle_proc_fops = {
-       .open = nonseekable_open,
-       .write = cio_settle_write,
-       .llseek = no_llseek,
+static const struct proc_ops cio_settle_proc_ops = {
+       .proc_open      = nonseekable_open,
+       .proc_write     = cio_settle_write,
+       .proc_lseek     = no_llseek,
 };
 
 static int __init cio_settle_init(void)
 {
        struct proc_dir_entry *entry;
 
-       entry = proc_create("cio_settle", S_IWUSR, NULL,
-                           &cio_settle_proc_fops);
+       entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
        if (!entry)
                return -ENOMEM;
        return 0;
index 52aa95c..22d2db6 100644 (file)
@@ -7,7 +7,8 @@ ap-objs := ap_bus.o ap_card.o ap_queue.o
 obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
 # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
 zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
-zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt_ccamisc.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
 obj-$(CONFIG_ZCRYPT) += zcrypt.o
 # adapter drivers depend on ap.o and zcrypt.o
 obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
index d78d776..71dae64 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "zcrypt_api.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("IBM Corporation");
@@ -71,6 +72,17 @@ struct protaeskeytoken {
        u8  protkey[MAXPROTKEYSIZE]; /* the protected key blob */
 } __packed;
 
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearaeskeytoken {
+       u8  type;        /* 0x00 for PAES specific key tokens */
+       u8  res0[3];
+       u8  version;     /* 0x02 for clear AES key token */
+       u8  res1[3];
+       u32 keytype;     /* key type, one of the PKEY_KEYTYPE values */
+       u32 len;         /* bytes actually stored in clearkey[] */
+       u8  clearkey[0]; /* clear key value */
+} __packed;
+
 /*
  * Create a protected key from a clear key value.
  */
@@ -172,6 +184,72 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
        return rc;
 }
 
+/*
+ * Construct EP11 key with given clear key value.
+ */
+static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+                           u8 *keybuf, size_t *keybuflen)
+{
+       int i, rc;
+       u16 card, dom;
+       u32 nr_apqns, *apqns = NULL;
+
+       /* build a list of apqns suitable for ep11 keys with cpacf support */
+       rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+                           ZCRYPT_CEX7, EP11_API_V, NULL);
+       if (rc)
+               goto out;
+
+       /* go through the list of apqns and try to bild an ep11 key */
+       for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+               card = apqns[i] >> 16;
+               dom = apqns[i] & 0xFFFF;
+               rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+                                     0, clrkey, keybuf, keybuflen);
+               if (rc == 0)
+                       break;
+       }
+
+out:
+       kfree(apqns);
+       if (rc)
+               DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+       return rc;
+}
+
+/*
+ * Find card and transform EP11 secure key into protected key.
+ */
+static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
+{
+       int i, rc;
+       u16 card, dom;
+       u32 nr_apqns, *apqns = NULL;
+       struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+       /* build a list of apqns suitable for this key */
+       rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+                           ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+       if (rc)
+               goto out;
+
+       /* go through the list of apqns and try to derive an pkey */
+       for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+               card = apqns[i] >> 16;
+               dom = apqns[i] & 0xFFFF;
+               rc = ep11_key2protkey(card, dom, key, kb->head.len,
+                                     pkey->protkey, &pkey->len, &pkey->type);
+               if (rc == 0)
+                       break;
+       }
+
+out:
+       kfree(apqns);
+       if (rc)
+               DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+       return rc;
+}
+
 /*
  * Verify key and give back some info about the key.
  */
@@ -305,26 +383,90 @@ static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
 static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
                               struct pkey_protkey *protkey)
 {
+       int rc = -EINVAL;
+       u8 *tmpbuf = NULL;
        struct keytoken_header *hdr = (struct keytoken_header *)key;
-       struct protaeskeytoken *t;
 
        switch (hdr->version) {
-       case TOKVER_PROTECTED_KEY:
-               if (keylen != sizeof(struct protaeskeytoken))
-                       return -EINVAL;
+       case TOKVER_PROTECTED_KEY: {
+               struct protaeskeytoken *t;
 
+               if (keylen != sizeof(struct protaeskeytoken))
+                       goto out;
                t = (struct protaeskeytoken *)key;
                protkey->len = t->len;
                protkey->type = t->keytype;
                memcpy(protkey->protkey, t->protkey,
                       sizeof(protkey->protkey));
-
-               return pkey_verifyprotkey(protkey);
+               rc = pkey_verifyprotkey(protkey);
+               break;
+       }
+       case TOKVER_CLEAR_KEY: {
+               struct clearaeskeytoken *t;
+               struct pkey_clrkey ckey;
+               union u_tmpbuf {
+                       u8 skey[SECKEYBLOBSIZE];
+                       u8 ep11key[MAXEP11AESKEYBLOBSIZE];
+               };
+               size_t tmpbuflen = sizeof(union u_tmpbuf);
+
+               if (keylen < sizeof(struct clearaeskeytoken))
+                       goto out;
+               t = (struct clearaeskeytoken *)key;
+               if (keylen != sizeof(*t) + t->len)
+                       goto out;
+               if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
+                   || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
+                   || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+                       memcpy(ckey.clrkey, t->clearkey, t->len);
+               else
+                       goto out;
+               /* alloc temp key buffer space */
+               tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
+               if (!tmpbuf) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               /* try direct way with the PCKMO instruction */
+               rc = pkey_clr2protkey(t->keytype, &ckey, protkey);
+               if (rc == 0)
+                       break;
+               /* PCKMO failed, so try the CCA secure key way */
+               rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype,
+                                   ckey.clrkey, tmpbuf);
+               if (rc == 0)
+                       rc = pkey_skey2pkey(tmpbuf, protkey);
+               if (rc == 0)
+                       break;
+               /* if the CCA way also failed, let's try via EP11 */
+               rc = pkey_clr2ep11key(ckey.clrkey, t->len,
+                                     tmpbuf, &tmpbuflen);
+               if (rc == 0)
+                       rc = pkey_ep11key2pkey(tmpbuf, protkey);
+               /* now we should really have an protected key */
+               DEBUG_ERR("%s unable to build protected key from clear",
+                         __func__);
+               break;
+       }
+       case TOKVER_EP11_AES: {
+               if (keylen < MINEP11AESKEYBLOBSIZE)
+                       goto out;
+               /* check ep11 key for exportable as protected key */
+               rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+               if (rc)
+                       goto out;
+               rc = pkey_ep11key2pkey(key, protkey);
+               break;
+       }
        default:
                DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
                          __func__, hdr->version);
-               return -EINVAL;
+               rc = -EINVAL;
        }
+
+out:
+       kfree(tmpbuf);
+       return rc;
 }
 
 /*
@@ -403,6 +545,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
                if (*keybufsize < SECKEYBLOBSIZE)
                        return -EINVAL;
                break;
+       case PKEY_TYPE_EP11:
+               if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+                       return -EINVAL;
+               break;
        default:
                return -EINVAL;
        }
@@ -419,7 +565,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
        for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
                card = apqns[i].card;
                dom = apqns[i].domain;
-               if (ktype == PKEY_TYPE_CCA_DATA) {
+               if (ktype == PKEY_TYPE_EP11) {
+                       rc = ep11_genaeskey(card, dom, ksize, kflags,
+                                           keybuf, keybufsize);
+               } else if (ktype == PKEY_TYPE_CCA_DATA) {
                        rc = cca_genseckey(card, dom, ksize, keybuf);
                        *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
                } else /* TOKVER_CCA_VLSC */
@@ -450,6 +599,10 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
                if (*keybufsize < SECKEYBLOBSIZE)
                        return -EINVAL;
                break;
+       case PKEY_TYPE_EP11:
+               if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+                       return -EINVAL;
+               break;
        default:
                return -EINVAL;
        }
@@ -466,7 +619,10 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
        for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
                card = apqns[i].card;
                dom = apqns[i].domain;
-               if (ktype == PKEY_TYPE_CCA_DATA) {
+               if (ktype == PKEY_TYPE_EP11) {
+                       rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+                                             clrkey, keybuf, keybufsize);
+               } else if (ktype == PKEY_TYPE_CCA_DATA) {
                        rc = cca_clr2seckey(card, dom, ksize,
                                            clrkey, keybuf);
                        *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
@@ -489,11 +645,11 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
        u32 _nr_apqns, *_apqns = NULL;
        struct keytoken_header *hdr = (struct keytoken_header *)key;
 
-       if (keylen < sizeof(struct keytoken_header) ||
-           hdr->type != TOKTYPE_CCA_INTERNAL)
+       if (keylen < sizeof(struct keytoken_header))
                return -EINVAL;
 
-       if (hdr->version == TOKVER_CCA_AES) {
+       if (hdr->type == TOKTYPE_CCA_INTERNAL
+           && hdr->version == TOKVER_CCA_AES) {
                struct secaeskeytoken *t = (struct secaeskeytoken *)key;
 
                rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
@@ -521,7 +677,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
                *cardnr = ((struct pkey_apqn *)_apqns)->card;
                *domain = ((struct pkey_apqn *)_apqns)->domain;
 
-       } else if (hdr->version == TOKVER_CCA_VLSC) {
+       } else if (hdr->type == TOKTYPE_CCA_INTERNAL
+                  && hdr->version == TOKVER_CCA_VLSC) {
                struct cipherkeytoken *t = (struct cipherkeytoken *)key;
 
                rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
@@ -556,6 +713,29 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
                *cardnr = ((struct pkey_apqn *)_apqns)->card;
                *domain = ((struct pkey_apqn *)_apqns)->domain;
 
+       } else if (hdr->type == TOKTYPE_NON_CCA
+                  && hdr->version == TOKVER_EP11_AES) {
+               struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+               rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+               if (rc)
+                       goto out;
+               if (ktype)
+                       *ktype = PKEY_TYPE_EP11;
+               if (ksize)
+                       *ksize = kb->head.keybitlen;
+
+               rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+                                   ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+               if (rc)
+                       goto out;
+
+               if (flags)
+                       *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+               *cardnr = ((struct pkey_apqn *)_apqns)->card;
+               *domain = ((struct pkey_apqn *)_apqns)->domain;
+
        } else
                rc = -EINVAL;
 
@@ -578,30 +758,32 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
        if (keylen < sizeof(struct keytoken_header))
                return -EINVAL;
 
-       switch (hdr->type) {
-       case TOKTYPE_NON_CCA:
-               return pkey_nonccatok2pkey(key, keylen, pkey);
-       case TOKTYPE_CCA_INTERNAL:
-               switch (hdr->version) {
-               case TOKVER_CCA_AES:
+       if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+               if (hdr->version == TOKVER_CCA_AES) {
                        if (keylen != sizeof(struct secaeskeytoken))
                                return -EINVAL;
                        if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
                                return -EINVAL;
-                       break;
-               case TOKVER_CCA_VLSC:
+               } else if (hdr->version == TOKVER_CCA_VLSC) {
                        if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
                                return -EINVAL;
                        if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
                                return -EINVAL;
-                       break;
-               default:
+               } else {
                        DEBUG_ERR("%s unknown CCA internal token version %d\n",
                                  __func__, hdr->version);
                        return -EINVAL;
                }
-               break;
-       default:
+       } else if (hdr->type == TOKTYPE_NON_CCA) {
+               if (hdr->version == TOKVER_EP11_AES) {
+                       if (keylen < sizeof(struct ep11keyblob))
+                               return -EINVAL;
+                       if (ep11_check_aeskeyblob(debug_info, 3, key, 0, 1))
+                               return -EINVAL;
+               } else {
+                       return pkey_nonccatok2pkey(key, keylen, pkey);
+               }
+       } else {
                DEBUG_ERR("%s unknown/unsupported blob type %d\n",
                          __func__, hdr->type);
                return -EINVAL;
@@ -611,12 +793,21 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
        for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
                card = apqns[i].card;
                dom = apqns[i].domain;
-               if (hdr->version == TOKVER_CCA_AES)
+               if (hdr->type == TOKTYPE_CCA_INTERNAL
+                   && hdr->version == TOKVER_CCA_AES)
                        rc = cca_sec2protkey(card, dom, key, pkey->protkey,
                                             &pkey->len, &pkey->type);
-               else /* TOKVER_CCA_VLSC */
+               else if (hdr->type == TOKTYPE_CCA_INTERNAL
+                        && hdr->version == TOKVER_CCA_VLSC)
                        rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
                                                &pkey->len, &pkey->type);
+               else { /* EP11 AES secure key blob */
+                       struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+                       rc = ep11_key2protkey(card, dom, key, kb->head.len,
+                                             pkey->protkey, &pkey->len,
+                                             &pkey->type);
+               }
                if (rc == 0)
                        break;
        }
@@ -631,12 +822,24 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
        u32 _nr_apqns, *_apqns = NULL;
        struct keytoken_header *hdr = (struct keytoken_header *)key;
 
-       if (keylen < sizeof(struct keytoken_header) ||
-           hdr->type != TOKTYPE_CCA_INTERNAL ||
-           flags == 0)
+       if (keylen < sizeof(struct keytoken_header) || flags == 0)
                return -EINVAL;
 
-       if (hdr->version == TOKVER_CCA_AES || hdr->version == TOKVER_CCA_VLSC) {
+       if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) {
+               int minhwtype = 0, api = 0;
+               struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+               if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+                       return -EINVAL;
+               if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+                       minhwtype = ZCRYPT_CEX7;
+                       api = EP11_API_V;
+               }
+               rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+                                   minhwtype, api, kb->wkvp);
+               if (rc)
+                       goto out;
+       } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
                int minhwtype = ZCRYPT_CEX3C;
                u64 cur_mkvp = 0, old_mkvp = 0;
 
@@ -647,7 +850,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
                                cur_mkvp = t->mkvp;
                        if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
                                old_mkvp = t->mkvp;
-               } else {
+               } else if (hdr->version == TOKVER_CCA_VLSC) {
                        struct cipherkeytoken *t = (struct cipherkeytoken *)key;
 
                        minhwtype = ZCRYPT_CEX6;
@@ -655,19 +858,24 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
                                cur_mkvp = t->mkvp0;
                        if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
                                old_mkvp = t->mkvp0;
+               } else {
+                       /* unknown cca internal token type */
+                       return -EINVAL;
                }
                rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
                                   minhwtype, cur_mkvp, old_mkvp, 1);
                if (rc)
                        goto out;
-               if (apqns) {
-                       if (*nr_apqns < _nr_apqns)
-                               rc = -ENOSPC;
-                       else
-                               memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
-               }
-               *nr_apqns = _nr_apqns;
+       } else
+               return -EINVAL;
+
+       if (apqns) {
+               if (*nr_apqns < _nr_apqns)
+                       rc = -ENOSPC;
+               else
+                       memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
        }
+       *nr_apqns = _nr_apqns;
 
 out:
        kfree(_apqns);
@@ -695,14 +903,26 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
                                   minhwtype, cur_mkvp, old_mkvp, 1);
                if (rc)
                        goto out;
-               if (apqns) {
-                       if (*nr_apqns < _nr_apqns)
-                               rc = -ENOSPC;
-                       else
-                               memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
-               }
-               *nr_apqns = _nr_apqns;
+       } else if (ktype == PKEY_TYPE_EP11) {
+               u8 *wkvp = NULL;
+
+               if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+                       wkvp = cur_mkvp;
+               rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+                                   ZCRYPT_CEX7, EP11_API_V, wkvp);
+               if (rc)
+                       goto out;
+
+       } else
+               return -EINVAL;
+
+       if (apqns) {
+               if (*nr_apqns < _nr_apqns)
+                       rc = -ENOSPC;
+               else
+                       memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
        }
+       *nr_apqns = _nr_apqns;
 
 out:
        kfree(_apqns);
@@ -1357,8 +1577,9 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
                                            bool is_xts, char *buf, loff_t off,
                                            size_t count)
 {
-       size_t keysize;
-       int rc;
+       int i, rc, card, dom;
+       u32 nr_apqns, *apqns = NULL;
+       size_t keysize = CCACIPHERTOKENSIZE;
 
        if (off != 0 || count < CCACIPHERTOKENSIZE)
                return -EINVAL;
@@ -1366,22 +1587,31 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
                if (count < 2 * CCACIPHERTOKENSIZE)
                        return -EINVAL;
 
-       keysize = CCACIPHERTOKENSIZE;
-       rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize);
+       /* build a list of apqns able to generate an cipher key */
+       rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+                          ZCRYPT_CEX6, 0, 0, 0);
        if (rc)
                return rc;
-       memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize);
 
-       if (is_xts) {
-               keysize = CCACIPHERTOKENSIZE;
-               rc = cca_gencipherkey(-1, -1, keybits, 0,
-                                     buf + CCACIPHERTOKENSIZE, &keysize);
+       memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+       /* simple try all apqns from the list */
+       for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+               card = apqns[i] >> 16;
+               dom = apqns[i] & 0xFFFF;
+               rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+               if (rc == 0)
+                       break;
+       }
                if (rc)
                        return rc;
-               memset(buf + CCACIPHERTOKENSIZE + keysize, 0,
-                      CCACIPHERTOKENSIZE - keysize);
 
-               return 2 * CCACIPHERTOKENSIZE;
+       if (is_xts) {
+               keysize = CCACIPHERTOKENSIZE;
+               buf += CCACIPHERTOKENSIZE;
+               rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+               if (rc == 0)
+                       return 2 * CCACIPHERTOKENSIZE;
        }
 
        return CCACIPHERTOKENSIZE;
@@ -1457,10 +1687,134 @@ static struct attribute_group ccacipher_attr_group = {
        .bin_attrs = ccacipher_attrs,
 };
 
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 320 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+                                      bool is_xts, char *buf, loff_t off,
+                                      size_t count)
+{
+       int i, rc, card, dom;
+       u32 nr_apqns, *apqns = NULL;
+       size_t keysize = MAXEP11AESKEYBLOBSIZE;
+
+       if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+               return -EINVAL;
+       if (is_xts)
+               if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+                       return -EINVAL;
+
+       /* build a list of apqns able to generate an cipher key */
+       rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+                           ZCRYPT_CEX7, EP11_API_V, NULL);
+       if (rc)
+               return rc;
+
+       memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+       /* simple try all apqns from the list */
+       for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+               card = apqns[i] >> 16;
+               dom = apqns[i] & 0xFFFF;
+               rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+               if (rc == 0)
+                       break;
+       }
+       if (rc)
+               return rc;
+
+       if (is_xts) {
+               keysize = MAXEP11AESKEYBLOBSIZE;
+               buf += MAXEP11AESKEYBLOBSIZE;
+               rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+               if (rc == 0)
+                       return 2 * MAXEP11AESKEYBLOBSIZE;
+       }
+
+       return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+                                struct kobject *kobj,
+                                struct bin_attribute *attr,
+                                char *buf, loff_t off,
+                                size_t count)
+{
+       return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+                                      off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+                                struct kobject *kobj,
+                                struct bin_attribute *attr,
+                                char *buf, loff_t off,
+                                size_t count)
+{
+       return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+                                      off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+                                struct kobject *kobj,
+                                struct bin_attribute *attr,
+                                char *buf, loff_t off,
+                                size_t count)
+{
+       return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+                                      off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+                                    struct kobject *kobj,
+                                    struct bin_attribute *attr,
+                                    char *buf, loff_t off,
+                                    size_t count)
+{
+       return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+                                      off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+                                    struct kobject *kobj,
+                                    struct bin_attribute *attr,
+                                    char *buf, loff_t off,
+                                    size_t count)
+{
+       return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+                                      off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+       &bin_attr_ep11_aes_128,
+       &bin_attr_ep11_aes_192,
+       &bin_attr_ep11_aes_256,
+       &bin_attr_ep11_aes_128_xts,
+       &bin_attr_ep11_aes_256_xts,
+       NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+       .name      = "ep11",
+       .bin_attrs = ep11_attrs,
+};
+
 static const struct attribute_group *pkey_attr_groups[] = {
        &protkey_attr_group,
        &ccadata_attr_group,
        &ccacipher_attr_group,
+       &ep11_attr_group,
        NULL,
 };
 
index 9157e72..a42257d 100644 (file)
@@ -36,6 +36,7 @@
 #include "zcrypt_msgtype6.h"
 #include "zcrypt_msgtype50.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 /*
  * Module description.
@@ -849,7 +850,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
                        /* check if device is online and eligible */
                        if (!zq->online ||
                            !zq->ops->send_cprb ||
-                           (tdom != (unsigned short) AUTOSELECT &&
+                           (tdom != AUTOSEL_DOM &&
                             tdom != AP_QID_QUEUE(zq->queue->qid)))
                                continue;
                        /* check if device node has admission for this queue */
@@ -874,7 +875,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
 
        /* in case of auto select, provide the correct domain */
        qid = pref_zq->queue->qid;
-       if (*domain == (unsigned short) AUTOSELECT)
+       if (*domain == AUTOSEL_DOM)
                *domain = AP_QID_QUEUE(qid);
 
        rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
@@ -901,7 +902,7 @@ static bool is_desired_ep11_card(unsigned int dev_id,
                                 struct ep11_target_dev *targets)
 {
        while (target_num-- > 0) {
-               if (dev_id == targets->ap_id)
+               if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
                        return true;
                targets++;
        }
@@ -912,16 +913,19 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
                                  unsigned short target_num,
                                  struct ep11_target_dev *targets)
 {
+       int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
        while (target_num-- > 0) {
-               if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+               if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+                   (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
                        return true;
                targets++;
        }
        return false;
 }
 
-static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
-                                 struct ep11_urb *xcrb)
+static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
+                                  struct ep11_urb *xcrb)
 {
        struct zcrypt_card *zc, *pref_zc;
        struct zcrypt_queue *zq, *pref_zq;
@@ -1026,6 +1030,12 @@ out:
        return rc;
 }
 
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+       return _zcrypt_send_ep11_cprb(&ap_perms, xcrb);
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
 static long zcrypt_rng(char *buffer)
 {
        struct zcrypt_card *zc, *pref_zc;
@@ -1366,12 +1376,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
                if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
                        return -EFAULT;
                do {
-                       rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+                       rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
                } while (rc == -EAGAIN);
                /* on failure: retry once again after a requested rescan */
                if ((rc == -ENODEV) && (zcrypt_process_rescan()))
                        do {
-                               rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+                               rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
                        } while (rc == -EAGAIN);
                if (rc)
                        ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
@@ -1885,6 +1895,7 @@ void __exit zcrypt_api_exit(void)
        zcrypt_msgtype6_exit();
        zcrypt_msgtype50_exit();
        zcrypt_ccamisc_exit();
+       zcrypt_ep11misc_exit();
        zcrypt_debug_exit();
 }
 
index d464618..599e68b 100644 (file)
@@ -140,6 +140,7 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
 int zcrypt_api_init(void);
 void zcrypt_api_exit(void);
 long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
 int zcrypt_device_status_ext(int card, int queue,
                             struct zcrypt_device_status_ext *devstatus);
index 77b6cc7..3a9876d 100644 (file)
@@ -19,6 +19,7 @@
 
 /* For TOKTYPE_NON_CCA: */
 #define TOKVER_PROTECTED_KEY   0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY       0x02 /* Clear key token */
 
 /* For TOKTYPE_CCA_INTERNAL: */
 #define TOKVER_CCA_AES         0x04 /* CCA AES key token */
index 6fabc90..9a9d02e 100644 (file)
@@ -19,6 +19,7 @@
 #include "zcrypt_error.h"
 #include "zcrypt_cex4.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 #define CEX4A_MIN_MOD_SIZE       1     /*    8 bits    */
 #define CEX4A_MAX_MOD_SIZE_2K  256     /* 2048 bits    */
@@ -71,11 +72,11 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
 MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
 
 /*
- * CCA card addditional device attributes
+ * CCA card additional device attributes
  */
-static ssize_t serialnr_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
+static ssize_t cca_serialnr_show(struct device *dev,
+                                struct device_attribute *attr,
+                                char *buf)
 {
        struct cca_info ci;
        struct ap_card *ac = to_ap_card(dev);
@@ -88,23 +89,25 @@ static ssize_t serialnr_show(struct device *dev,
 
        return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
 }
-static DEVICE_ATTR_RO(serialnr);
+
+static struct device_attribute dev_attr_cca_serialnr =
+       __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
 
 static struct attribute *cca_card_attrs[] = {
-       &dev_attr_serialnr.attr,
+       &dev_attr_cca_serialnr.attr,
        NULL,
 };
 
-static const struct attribute_group cca_card_attr_group = {
+static const struct attribute_group cca_card_attr_grp = {
        .attrs = cca_card_attrs,
 };
 
-/*
* CCA queue addditional device attributes
- */
-static ssize_t mkvps_show(struct device *dev,
-                         struct device_attribute *attr,
-                         char *buf)
+ /*
 * CCA queue additional device attributes
 */
+static ssize_t cca_mkvps_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
 {
        int n = 0;
        struct cca_info ci;
@@ -138,17 +141,233 @@ static ssize_t mkvps_show(struct device *dev,
 
        return n;
 }
-static DEVICE_ATTR_RO(mkvps);
+
+static struct device_attribute dev_attr_cca_mkvps =
+       __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
 
 static struct attribute *cca_queue_attrs[] = {
-       &dev_attr_mkvps.attr,
+       &dev_attr_cca_mkvps.attr,
        NULL,
 };
 
-static const struct attribute_group cca_queue_attr_group = {
+static const struct attribute_group cca_queue_attr_grp = {
        .attrs = cca_queue_attrs,
 };
 
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       struct ep11_card_info ci;
+       struct ap_card *ac = to_ap_card(dev);
+       struct zcrypt_card *zc = ac->private;
+
+       memset(&ci, 0, sizeof(ci));
+
+       ep11_get_card_info(ac->id, &ci, zc->online);
+
+       if (ci.API_ord_nr > 0)
+               return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+       else
+               return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+       __ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       struct ep11_card_info ci;
+       struct ap_card *ac = to_ap_card(dev);
+       struct zcrypt_card *zc = ac->private;
+
+       memset(&ci, 0, sizeof(ci));
+
+       ep11_get_card_info(ac->id, &ci, zc->online);
+
+       if (ci.FW_version > 0)
+               return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+                               (int)(ci.FW_version >> 8),
+                               (int)(ci.FW_version & 0xFF));
+       else
+               return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+       __ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+                                 struct device_attribute *attr,
+                                 char *buf)
+{
+       struct ep11_card_info ci;
+       struct ap_card *ac = to_ap_card(dev);
+       struct zcrypt_card *zc = ac->private;
+
+       memset(&ci, 0, sizeof(ci));
+
+       ep11_get_card_info(ac->id, &ci, zc->online);
+
+       if (ci.serial[0])
+               return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+       else
+               return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+       __ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+       int         mode_bit;
+       const char *mode_txt;
+} ep11_op_modes[] = {
+       { 0, "FIPS2009" },
+       { 1, "BSI2009" },
+       { 2, "FIPS2011" },
+       { 3, "BSI2011" },
+       { 6, "BSICC2017" },
+       { 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       int i, n = 0;
+       struct ep11_card_info ci;
+       struct ap_card *ac = to_ap_card(dev);
+       struct zcrypt_card *zc = ac->private;
+
+       memset(&ci, 0, sizeof(ci));
+
+       ep11_get_card_info(ac->id, &ci, zc->online);
+
+       for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+               if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+                       if (n > 0)
+                               buf[n++] = ' ';
+                       n += snprintf(buf + n, PAGE_SIZE - n,
+                                     "%s", ep11_op_modes[i].mode_txt);
+               }
+       }
+       n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+       return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+       __ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+       &dev_attr_ep11_api_ordinalnr.attr,
+       &dev_attr_ep11_fw_version.attr,
+       &dev_attr_ep11_serialnr.attr,
+       &dev_attr_ep11_card_op_modes.attr,
+       NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+       .attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       int n = 0;
+       struct ep11_domain_info di;
+       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+       static const char * const cwk_state[] = { "invalid", "valid" };
+       static const char * const nwk_state[] = { "empty", "uncommitted",
+                                                 "committed" };
+
+       memset(&di, 0, sizeof(di));
+
+       if (zq->online)
+               ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+                                    AP_QID_QUEUE(zq->queue->qid),
+                                    &di);
+
+       if (di.cur_wk_state == '0') {
+               n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+                            cwk_state[di.cur_wk_state - '0']);
+       } else if (di.cur_wk_state == '1') {
+               n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+                            cwk_state[di.cur_wk_state - '0']);
+               bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+               n += 2 * sizeof(di.cur_wkvp);
+               n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+       } else
+               n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+
+       if (di.new_wk_state == '0') {
+               n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+                             nwk_state[di.new_wk_state - '0']);
+       } else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+               n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+                             nwk_state[di.new_wk_state - '0']);
+               bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+               n += 2 * sizeof(di.new_wkvp);
+               n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+       } else
+               n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+
+       return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+       __ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       int i, n = 0;
+       struct ep11_domain_info di;
+       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+       memset(&di, 0, sizeof(di));
+
+       if (zq->online)
+               ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+                                    AP_QID_QUEUE(zq->queue->qid),
+                                    &di);
+
+       for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+               if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+                       if (n > 0)
+                               buf[n++] = ' ';
+                       n += snprintf(buf + n, PAGE_SIZE - n,
+                                     "%s", ep11_op_modes[i].mode_txt);
+               }
+       }
+       n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+       return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+       __ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+       &dev_attr_ep11_mkvps.attr,
+       &dev_attr_ep11_queue_op_modes.attr,
+       NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+       .attrs = ep11_queue_attrs,
+};
+
 /**
  * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
  * accepts the AP device since the bus_match already checked
@@ -313,7 +532,12 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
 
        if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
                rc = sysfs_create_group(&ap_dev->device.kobj,
-                                       &cca_card_attr_group);
+                                       &cca_card_attr_grp);
+               if (rc)
+                       zcrypt_card_unregister(zc);
+       } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+               rc = sysfs_create_group(&ap_dev->device.kobj,
+                                       &ep11_card_attr_grp);
                if (rc)
                        zcrypt_card_unregister(zc);
        }
@@ -332,7 +556,9 @@ static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
        struct zcrypt_card *zc = ac->private;
 
        if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
-               sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_group);
+               sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+       else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+               sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
        if (zc)
                zcrypt_card_unregister(zc);
 }
@@ -394,7 +620,12 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
 
        if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
                rc = sysfs_create_group(&ap_dev->device.kobj,
-                                       &cca_queue_attr_group);
+                                       &cca_queue_attr_grp);
+               if (rc)
+                       zcrypt_queue_unregister(zq);
+       } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+               rc = sysfs_create_group(&ap_dev->device.kobj,
+                                       &ep11_queue_attr_grp);
                if (rc)
                        zcrypt_queue_unregister(zq);
        }
@@ -413,7 +644,9 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
        struct zcrypt_queue *zq = aq->private;
 
        if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
-               sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_group);
+               sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+       else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+               sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
        if (zq)
                zcrypt_queue_unregister(zq);
 }
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644 (file)
index 0000000..d4caf46
--- /dev/null
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Copyright IBM Corp. 2019
+ *  Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ *  Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+                              0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/* ep11 card info cache */
+struct card_list_entry {
+       struct list_head list;
+       u16 cardnr;
+       struct ep11_card_info info;
+};
+static LIST_HEAD(card_list);
+static DEFINE_SPINLOCK(card_list_lock);
+
+static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
+{
+       int rc = -ENOENT;
+       struct card_list_entry *ptr;
+
+       spin_lock_bh(&card_list_lock);
+       list_for_each_entry(ptr, &card_list, list) {
+               if (ptr->cardnr == cardnr) {
+                       memcpy(ci, &ptr->info, sizeof(*ci));
+                       rc = 0;
+                       break;
+               }
+       }
+       spin_unlock_bh(&card_list_lock);
+
+       return rc;
+}
+
+static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
+{
+       int found = 0;
+       struct card_list_entry *ptr;
+
+       spin_lock_bh(&card_list_lock);
+       list_for_each_entry(ptr, &card_list, list) {
+               if (ptr->cardnr == cardnr) {
+                       memcpy(&ptr->info, ci, sizeof(*ci));
+                       found = 1;
+                       break;
+               }
+       }
+       if (!found) {
+               ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+               if (!ptr) {
+                       spin_unlock_bh(&card_list_lock);
+                       return;
+               }
+               ptr->cardnr = cardnr;
+               memcpy(&ptr->info, ci, sizeof(*ci));
+               list_add(&ptr->list, &card_list);
+       }
+       spin_unlock_bh(&card_list_lock);
+}
+
+static void card_cache_scrub(u16 cardnr)
+{
+       struct card_list_entry *ptr;
+
+       spin_lock_bh(&card_list_lock);
+       list_for_each_entry(ptr, &card_list, list) {
+               if (ptr->cardnr == cardnr) {
+                       list_del(&ptr->list);
+                       kfree(ptr);
+                       break;
+               }
+       }
+       spin_unlock_bh(&card_list_lock);
+}
+
+static void __exit card_cache_free(void)
+{
+       struct card_list_entry *ptr, *pnext;
+
+       spin_lock_bh(&card_list_lock);
+       list_for_each_entry_safe(ptr, pnext, &card_list, list) {
+               list_del(&ptr->list);
+               kfree(ptr);
+       }
+       spin_unlock_bh(&card_list_lock);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+                         const u8 *key, int keybitsize,
+                         int checkcpacfexport)
+{
+       struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+       if (kb->head.type != TOKTYPE_NON_CCA) {
+               if (dbg)
+                       DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+                           __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+               return -EINVAL;
+       }
+       if (kb->head.version != TOKVER_EP11_AES) {
+               if (dbg)
+                       DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+                           __func__, (int) kb->head.version, TOKVER_EP11_AES);
+               return -EINVAL;
+       }
+       if (kb->version != EP11_STRUCT_MAGIC) {
+               if (dbg)
+                       DBF("%s key check failed, magic 0x%04x != 0x%04x\n",
+                           __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+               return -EINVAL;
+       }
+       switch (kb->head.keybitlen) {
+       case 128:
+       case 192:
+       case 256:
+               break;
+       default:
+               if (dbg)
+                       DBF("%s key check failed, keybitlen %d invalid\n",
+                           __func__, (int) kb->head.keybitlen);
+               return -EINVAL;
+       }
+       if (keybitsize > 0 && keybitsize != (int) kb->head.keybitlen) {
+               DBF("%s key check failed, keybitsize %d\n",
+                   __func__, keybitsize);
+               return -EINVAL;
+       }
+       if (checkcpacfexport && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+               if (dbg)
+                       DBF("%s key check failed, PKEY_EXTRACTABLE is 0\n",
+                           __func__);
+               return -EINVAL;
+       }
+#undef DBF
+
+       return 0;
+}
+EXPORT_SYMBOL(ep11_check_aeskeyblob);
+
+/*
+ * Helper function which calls zcrypt_send_ep11_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_ep11_cprb(struct ep11_urb *urb)
+{
+       int rc;
+       mm_segment_t old_fs = get_fs();
+
+       set_fs(KERNEL_DS);
+       rc = zcrypt_send_ep11_cprb(urb);
+       set_fs(old_fs);
+
+       return rc;
+}
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+{
+       size_t len = sizeof(struct ep11_cprb) + payload_len;
+       struct ep11_cprb *cprb;
+
+       cprb = kmalloc(len, GFP_KERNEL);
+       if (!cprb)
+               return NULL;
+
+       memset(cprb, 0, len);
+       cprb->cprb_len = sizeof(struct ep11_cprb);
+       cprb->cprb_ver_id = 0x04;
+       memcpy(cprb->func_id, "T4", 2);
+       cprb->ret_code = 0xFFFFFFFF;
+       cprb->payload_len = payload_len;
+
+       return cprb;
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+       ptr[0] = tag;
+       if (valuelen > 255) {
+               ptr[1] = 0x82;
+               *((u16 *)(ptr + 2)) = valuelen;
+               memcpy(ptr + 4, pvalue, valuelen);
+               return 4 + valuelen;
+       }
+       if (valuelen > 127) {
+               ptr[1] = 0x81;
+               ptr[2] = (u8) valuelen;
+               memcpy(ptr + 3, pvalue, valuelen);
+               return 3 + valuelen;
+       }
+       ptr[1] = (u8) valuelen;
+       memcpy(ptr + 2, pvalue, valuelen);
+       return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+       u8  tag;
+       u8  lenfmt;
+       u16 len;
+       u8  func_tag;
+       u8  func_len;
+       u32 func;
+       u8  dom_tag;
+       u8  dom_len;
+       u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+                            size_t pl_size, int api, int func)
+{
+       h->tag = 0x30;
+       h->lenfmt = 0x82;
+       h->len = pl_size - 4;
+       h->func_tag = 0x04;
+       h->func_len = sizeof(u32);
+       h->func = (api << 16) + func;
+       h->dom_tag = 0x04;
+       h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+                           struct ep11_target_dev *t, int nt,
+                           struct ep11_cprb *req, size_t req_len,
+                           struct ep11_cprb *rep, size_t rep_len)
+{
+       u->targets = (u8 __user *) t;
+       u->targets_num = nt;
+       u->req = (u8 __user *) req;
+       u->req_len = req_len;
+       u->resp = (u8 __user *) rep;
+       u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+       int len;
+       u32 ret;
+
+       /* start tag */
+       if (*pl++ != 0x30) {
+               DEBUG_ERR("%s reply start tag mismatch\n", func);
+               return -EIO;
+       }
+
+       /* payload length format */
+       if (*pl < 127) {
+               len = *pl;
+               pl++;
+       } else if (*pl == 0x81) {
+               pl++;
+               len = *pl;
+               pl++;
+       } else if (*pl == 0x82) {
+               pl++;
+               len = *((u16 *)pl);
+               pl += 2;
+       } else {
+               DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+                         func, *pl);
+               return -EIO;
+       }
+
+       /* len should cover at least 3 fields with 32 bit value each */
+       if (len < 3 * 6) {
+               DEBUG_ERR("%s reply length %d too small\n", func, len);
+               return -EIO;
+       }
+
+       /* function tag, length and value */
+       if (pl[0] != 0x04 || pl[1] != 0x04) {
+               DEBUG_ERR("%s function tag or length mismatch\n", func);
+               return -EIO;
+       }
+       pl += 6;
+
+       /* dom tag, length and value */
+       if (pl[0] != 0x04 || pl[1] != 0x04) {
+               DEBUG_ERR("%s dom tag or length mismatch\n", func);
+               return -EIO;
+       }
+       pl += 6;
+
+       /* return value tag, length and value */
+       if (pl[0] != 0x04 || pl[1] != 0x04) {
+               DEBUG_ERR("%s return value tag or length mismatch\n", func);
+               return -EIO;
+       }
+       pl += 2;
+       ret = *((u32 *)pl);
+       if (ret != 0) {
+               DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+                          size_t buflen, u8 *buf)
+{
+       struct ep11_info_req_pl {
+               struct pl_head head;
+               u8  query_type_tag;
+               u8  query_type_len;
+               u32 query_type;
+               u8  query_subtype_tag;
+               u8  query_subtype_len;
+               u32 query_subtype;
+       } __packed * req_pl;
+       struct ep11_info_rep_pl {
+               struct pl_head head;
+               u8  rc_tag;
+               u8  rc_len;
+               u32 rc;
+               u8  data_tag;
+               u8  data_lenfmt;
+               u16 data_len;
+       } __packed * rep_pl;
+       struct ep11_cprb *req = NULL, *rep = NULL;
+       struct ep11_target_dev target;
+       struct ep11_urb *urb = NULL;
+       int api = 1, rc = -ENOMEM;
+
+       /* request cprb and payload */
+       req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+       if (!req)
+               goto out;
+       req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+       prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+       req_pl->query_type_tag = 0x04;
+       req_pl->query_type_len = sizeof(u32);
+       req_pl->query_type = query_type;
+       req_pl->query_subtype_tag = 0x04;
+       req_pl->query_subtype_len = sizeof(u32);
+
+       /* reply cprb and payload */
+       rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+       if (!rep)
+               goto out;
+       rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+       /* urb and target */
+       urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+       if (!urb)
+               goto out;
+       target.ap_id = cardnr;
+       target.dom_id = domain;
+       prep_urb(urb, &target, 1,
+                req, sizeof(*req) + sizeof(*req_pl),
+                rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+       rc = _zcrypt_send_ep11_cprb(urb);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+                       __func__, (int) cardnr, (int) domain, rc);
+               goto out;
+       }
+
+       rc = check_reply_pl((u8 *)rep_pl, __func__);
+       if (rc)
+               goto out;
+       if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+               DEBUG_ERR("%s unknown reply data format\n", __func__);
+               rc = -EIO;
+               goto out;
+       }
+       if (rep_pl->data_len > buflen) {
+               DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
+                         __func__);
+               rc = -ENOSPC;
+               goto out;
+       }
+
+       memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+       kfree(req);
+       kfree(rep);
+       kfree(urb);
+       return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+{
+       int rc;
+       struct ep11_module_query_info {
+               u32 API_ord_nr;
+               u32 firmware_id;
+               u8  FW_major_vers;
+               u8  FW_minor_vers;
+               u8  CSP_major_vers;
+               u8  CSP_minor_vers;
+               u8  fwid[32];
+               u8  xcp_config_hash[32];
+               u8  CSP_config_hash[32];
+               u8  serial[16];
+               u8  module_date_time[16];
+               u64 op_mode;
+               u32 PKCS11_flags;
+               u32 ext_flags;
+               u32 domains;
+               u32 sym_state_bytes;
+               u32 digest_state_bytes;
+               u32 pin_blob_bytes;
+               u32 SPKI_bytes;
+               u32 priv_key_blob_bytes;
+               u32 sym_blob_bytes;
+               u32 max_payload_bytes;
+               u32 CP_profile_bytes;
+               u32 max_CP_index;
+       } __packed * pmqi = NULL;
+
+       rc = card_cache_fetch(card, info);
+       if (rc || verify) {
+               pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
+               if (!pmqi)
+                       return -ENOMEM;
+               rc = ep11_query_info(card, AUTOSEL_DOM,
+                                    0x01 /* module info query */,
+                                    sizeof(*pmqi), (u8 *) pmqi);
+               if (rc) {
+                       if (rc == -ENODEV)
+                               card_cache_scrub(card);
+                       goto out;
+               }
+               memset(info, 0, sizeof(*info));
+               info->API_ord_nr = pmqi->API_ord_nr;
+               info->FW_version =
+                       (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+               memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+               info->op_mode = pmqi->op_mode;
+               card_cache_update(card, info);
+       }
+
+out:
+       kfree(pmqi);
+       return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+{
+       int rc;
+       struct ep11_domain_query_info {
+               u32 dom_index;
+               u8  cur_WK_VP[32];
+               u8  new_WK_VP[32];
+               u32 dom_flags;
+               u64 op_mode;
+       } __packed * p_dom_info;
+
+       p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
+       if (!p_dom_info)
+               return -ENOMEM;
+
+       rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+                            sizeof(*p_dom_info), (u8 *) p_dom_info);
+       if (rc)
+               goto out;
+
+       memset(info, 0, sizeof(*info));
+       info->cur_wk_state = '0';
+       info->new_wk_state = '0';
+       if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
+               if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+                       info->cur_wk_state = '1';
+                       memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+               }
+               if (p_dom_info->dom_flags & 0x04 /* new wk present */
+                   || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+                       info->new_wk_state =
+                               p_dom_info->dom_flags & 0x08 ? '2' : '1';
+                       memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+               }
+       }
+       info->op_mode = p_dom_info->op_mode;
+
+out:
+       kfree(p_dom_info);
+       return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+                  u8 *keybuf, size_t *keybufsize)
+{
+       struct keygen_req_pl {
+               struct pl_head head;
+               u8  var_tag;
+               u8  var_len;
+               u32 var;
+               u8  keybytes_tag;
+               u8  keybytes_len;
+               u32 keybytes;
+               u8  mech_tag;
+               u8  mech_len;
+               u32 mech;
+               u8  attr_tag;
+               u8  attr_len;
+               u32 attr_header;
+               u32 attr_bool_mask;
+               u32 attr_bool_bits;
+               u32 attr_val_len_type;
+               u32 attr_val_len_value;
+               u8  pin_tag;
+               u8  pin_len;
+       } __packed * req_pl;
+       struct keygen_rep_pl {
+               struct pl_head head;
+               u8  rc_tag;
+               u8  rc_len;
+               u32 rc;
+               u8  data_tag;
+               u8  data_lenfmt;
+               u16 data_len;
+               u8  data[512];
+       } __packed * rep_pl;
+       struct ep11_cprb *req = NULL, *rep = NULL;
+       struct ep11_target_dev target;
+       struct ep11_urb *urb = NULL;
+       struct ep11keyblob *kb;
+       int api, rc = -ENOMEM;
+
+       switch (keybitsize) {
+       case 128:
+       case 192:
+       case 256:
+               break;
+       default:
+               DEBUG_ERR(
+                       "%s unknown/unsupported keybitsize %d\n",
+                       __func__, keybitsize);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* request cprb and payload */
+       req = alloc_cprb(sizeof(struct keygen_req_pl));
+       if (!req)
+               goto out;
+       req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+       api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+       prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+       req_pl->var_tag = 0x04;
+       req_pl->var_len = sizeof(u32);
+       req_pl->keybytes_tag = 0x04;
+       req_pl->keybytes_len = sizeof(u32);
+       req_pl->keybytes = keybitsize / 8;
+       req_pl->mech_tag = 0x04;
+       req_pl->mech_len = sizeof(u32);
+       req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+       req_pl->attr_tag = 0x04;
+       req_pl->attr_len = 5 * sizeof(u32);
+       req_pl->attr_header = 0x10010000;
+       req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+       req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+       req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+       req_pl->attr_val_len_value = keybitsize / 8;
+       req_pl->pin_tag = 0x04;
+
+       /* reply cprb and payload */
+       rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+       if (!rep)
+               goto out;
+       rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+       /* urb and target */
+       urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+       if (!urb)
+               goto out;
+       target.ap_id = card;
+       target.dom_id = domain;
+       prep_urb(urb, &target, 1,
+                req, sizeof(*req) + sizeof(*req_pl),
+                rep, sizeof(*rep) + sizeof(*rep_pl));
+
+       rc = _zcrypt_send_ep11_cprb(urb);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+                       __func__, (int) card, (int) domain, rc);
+               goto out;
+       }
+
+       rc = check_reply_pl((u8 *)rep_pl, __func__);
+       if (rc)
+               goto out;
+       if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+               DEBUG_ERR("%s unknown reply data format\n", __func__);
+               rc = -EIO;
+               goto out;
+       }
+       if (rep_pl->data_len > *keybufsize) {
+               DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+                         __func__);
+               rc = -ENOSPC;
+               goto out;
+       }
+
+       /* copy key blob and set header values */
+       memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+       *keybufsize = rep_pl->data_len;
+       kb = (struct ep11keyblob *) keybuf;
+       kb->head.type = TOKTYPE_NON_CCA;
+       kb->head.len = rep_pl->data_len;
+       kb->head.version = TOKVER_EP11_AES;
+       kb->head.keybitlen = keybitsize;
+
+out:
+       kfree(req);
+       kfree(rep);
+       kfree(urb);
+       return rc;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+                           u16 mode, u32 mech, const u8 *iv,
+                           const u8 *key, size_t keysize,
+                           const u8 *inbuf, size_t inbufsize,
+                           u8 *outbuf, size_t *outbufsize)
+{
+       struct crypt_req_pl {
+               struct pl_head head;
+               u8  var_tag;
+               u8  var_len;
+               u32 var;
+               u8  mech_tag;
+               u8  mech_len;
+               u32 mech;
+               /*
+                * maybe followed by iv data
+                * followed by key tag + key blob
+                * followed by plaintext tag + plaintext
+                */
+       } __packed * req_pl;
+       struct crypt_rep_pl {
+               struct pl_head head;
+               u8  rc_tag;
+               u8  rc_len;
+               u32 rc;
+               u8  data_tag;
+               u8  data_lenfmt;
+               /* data follows */
+       } __packed * rep_pl;
+       struct ep11_cprb *req = NULL, *rep = NULL;
+       struct ep11_target_dev target;
+       struct ep11_urb *urb = NULL;
+       size_t req_pl_size, rep_pl_size;
+       int n, api = 1, rc = -ENOMEM;
+       u8 *p;
+
+       /* the simple asn1 coding used has length limits */
+       if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+               return -EINVAL;
+
+       /* request cprb and payload */
+       req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+               + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+       req = alloc_cprb(req_pl_size);
+       if (!req)
+               goto out;
+       req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+       prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+       req_pl->var_tag = 0x04;
+       req_pl->var_len = sizeof(u32);
+       /* mech is mech + mech params (iv here) */
+       req_pl->mech_tag = 0x04;
+       req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+       req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+       p = ((u8 *) req_pl) + sizeof(*req_pl);
+       if (iv) {
+               memcpy(p, iv, 16);
+               p += 16;
+       }
+       /* key and input data */
+       p += asn1tag_write(p, 0x04, key, keysize);
+       p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+       /* reply cprb and payload, assume out data size <= in data size + 32 */
+       rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+       rep = alloc_cprb(rep_pl_size);
+       if (!rep)
+               goto out;
+       rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+       /* urb and target */
+       urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+       if (!urb)
+               goto out;
+       target.ap_id = card;
+       target.dom_id = domain;
+       prep_urb(urb, &target, 1,
+                req, sizeof(*req) + req_pl_size,
+                rep, sizeof(*rep) + rep_pl_size);
+
+       rc = _zcrypt_send_ep11_cprb(urb);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+                       __func__, (int) card, (int) domain, rc);
+               goto out;
+       }
+
+       rc = check_reply_pl((u8 *)rep_pl, __func__);
+       if (rc)
+               goto out;
+       if (rep_pl->data_tag != 0x04) {
+               DEBUG_ERR("%s unknown reply data format\n", __func__);
+               rc = -EIO;
+               goto out;
+       }
+       p = ((u8 *) rep_pl) + sizeof(*rep_pl);
+       if (rep_pl->data_lenfmt <= 127)
+               n = rep_pl->data_lenfmt;
+       else if (rep_pl->data_lenfmt == 0x81)
+               n = *p++;
+       else if (rep_pl->data_lenfmt == 0x82) {
+               n = *((u16 *) p);
+               p += 2;
+       } else {
+               DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
+                         __func__, rep_pl->data_lenfmt);
+               rc = -EIO;
+               goto out;
+       }
+       if (n > *outbufsize) {
+               DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+                         __func__, n, *outbufsize);
+               rc = -ENOSPC;
+               goto out;
+       }
+
+       memcpy(outbuf, p, n);
+       *outbufsize = n;
+
+out:
+       kfree(req);
+       kfree(rep);
+       kfree(urb);
+       return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+                         const u8 *kek, size_t keksize,
+                         const u8 *enckey, size_t enckeysize,
+                         u32 mech, const u8 *iv,
+                         u32 keybitsize, u32 keygenflags,
+                         u8 *keybuf, size_t *keybufsize)
+{
+       struct uw_req_pl {
+               struct pl_head head;
+               u8  attr_tag;
+               u8  attr_len;
+               u32 attr_header;
+               u32 attr_bool_mask;
+               u32 attr_bool_bits;
+               u32 attr_key_type;
+               u32 attr_key_type_value;
+               u32 attr_val_len;
+               u32 attr_val_len_value;
+               u8  mech_tag;
+               u8  mech_len;
+               u32 mech;
+               /*
+                * maybe followed by iv data
+                * followed by kek tag + kek blob
+                * followed by empty mac tag
+                * followed by empty pin tag
+                * followed by encryted key tag + bytes
+                */
+       } __packed * req_pl;
+       struct uw_rep_pl {
+               struct pl_head head;
+               u8  rc_tag;
+               u8  rc_len;
+               u32 rc;
+               u8  data_tag;
+               u8  data_lenfmt;
+               u16 data_len;
+               u8  data[512];
+       } __packed * rep_pl;
+       struct ep11_cprb *req = NULL, *rep = NULL;
+       struct ep11_target_dev target;
+       struct ep11_urb *urb = NULL;
+       struct ep11keyblob *kb;
+       size_t req_pl_size;
+       int api, rc = -ENOMEM;
+       u8 *p;
+
+       /* request cprb and payload */
+       req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+               + ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+       req = alloc_cprb(req_pl_size);
+       if (!req)
+               goto out;
+       req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+       api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+       prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+       req_pl->attr_tag = 0x04;
+       req_pl->attr_len = 7 * sizeof(u32);
+       req_pl->attr_header = 0x10020000;
+       req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+       req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+       req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+       req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+       req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+       req_pl->attr_val_len_value = keybitsize / 8;
+       /* mech is mech + mech params (iv here) */
+       req_pl->mech_tag = 0x04;
+       req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+       req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+       p = ((u8 *) req_pl) + sizeof(*req_pl);
+       if (iv) {
+               memcpy(p, iv, 16);
+               p += 16;
+       }
+       /* kek */
+       p += asn1tag_write(p, 0x04, kek, keksize);
+       /* empty mac key tag */
+       *p++ = 0x04;
+       *p++ = 0;
+       /* empty pin tag */
+       *p++ = 0x04;
+       *p++ = 0;
+       /* encrytped key value tag and bytes */
+       p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+       /* reply cprb and payload */
+       rep = alloc_cprb(sizeof(struct uw_rep_pl));
+       if (!rep)
+               goto out;
+       rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+       /* urb and target */
+       urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+       if (!urb)
+               goto out;
+       target.ap_id = card;
+       target.dom_id = domain;
+       prep_urb(urb, &target, 1,
+                req, sizeof(*req) + req_pl_size,
+                rep, sizeof(*rep) + sizeof(*rep_pl));
+
+       rc = _zcrypt_send_ep11_cprb(urb);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+                       __func__, (int) card, (int) domain, rc);
+               goto out;
+       }
+
+       rc = check_reply_pl((u8 *)rep_pl, __func__);
+       if (rc)
+               goto out;
+       if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+               DEBUG_ERR("%s unknown reply data format\n", __func__);
+               rc = -EIO;
+               goto out;
+       }
+       if (rep_pl->data_len > *keybufsize) {
+               DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+                         __func__);
+               rc = -ENOSPC;
+               goto out;
+       }
+
+       /* copy key blob and set header values */
+       memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+       *keybufsize = rep_pl->data_len;
+       kb = (struct ep11keyblob *) keybuf;
+       kb->head.type = TOKTYPE_NON_CCA;
+       kb->head.len = rep_pl->data_len;
+       kb->head.version = TOKVER_EP11_AES;
+       kb->head.keybitlen = keybitsize;
+
+out:
+       kfree(req);
+       kfree(rep);
+       kfree(urb);
+       return rc;
+}
+
+static int ep11_wrapkey(u16 card, u16 domain,
+                       const u8 *key, size_t keysize,
+                       u32 mech, const u8 *iv,
+                       u8 *databuf, size_t *datasize)
+{
+       struct wk_req_pl {
+               struct pl_head head;
+               u8  var_tag;
+               u8  var_len;
+               u32 var;
+               u8  mech_tag;
+               u8  mech_len;
+               u32 mech;
+               /*
+                * followed by iv data
+                * followed by key tag + key blob
+                * followed by dummy kek param
+                * followed by dummy mac param
+                */
+       } __packed * req_pl;
+       struct wk_rep_pl {
+               struct pl_head head;
+               u8  rc_tag;
+               u8  rc_len;
+               u32 rc;
+               u8  data_tag;
+               u8  data_lenfmt;
+               u16 data_len;
+               u8  data[512];
+       } __packed * rep_pl;
+       struct ep11_cprb *req = NULL, *rep = NULL;
+       struct ep11_target_dev target;
+       struct ep11_urb *urb = NULL;
+       struct ep11keyblob *kb;
+       size_t req_pl_size;
+       int api, rc = -ENOMEM;
+       u8 *p;
+
+       /* request cprb and payload */
+       req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+               + ASN1TAGLEN(keysize) + 4;
+       req = alloc_cprb(req_pl_size);
+       if (!req)
+               goto out;
+       if (!mech || mech == 0x80060001)
+               req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+       req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+       api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+       prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+       req_pl->var_tag = 0x04;
+       req_pl->var_len = sizeof(u32);
+       /* mech is mech + mech params (iv here) */
+       req_pl->mech_tag = 0x04;
+       req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+       req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+       p = ((u8 *) req_pl) + sizeof(*req_pl);
+       if (iv) {
+               memcpy(p, iv, 16);
+               p += 16;
+       }
+       /* key blob */
+       p += asn1tag_write(p, 0x04, key, keysize);
+       /* maybe the key argument needs the head data cleaned out */
+       kb = (struct ep11keyblob *)(p - keysize);
+       if (kb->head.version == TOKVER_EP11_AES)
+               memset(&kb->head, 0, sizeof(kb->head));
+       /* empty kek tag */
+       *p++ = 0x04;
+       *p++ = 0;
+       /* empty mac tag */
+       *p++ = 0x04;
+       *p++ = 0;
+
+       /* reply cprb and payload */
+       rep = alloc_cprb(sizeof(struct wk_rep_pl));
+       if (!rep)
+               goto out;
+       rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+       /* urb and target */
+       urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+       if (!urb)
+               goto out;
+       target.ap_id = card;
+       target.dom_id = domain;
+       prep_urb(urb, &target, 1,
+                req, sizeof(*req) + req_pl_size,
+                rep, sizeof(*rep) + sizeof(*rep_pl));
+
+       rc = _zcrypt_send_ep11_cprb(urb);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+                       __func__, (int) card, (int) domain, rc);
+               goto out;
+       }
+
+       rc = check_reply_pl((u8 *)rep_pl, __func__);
+       if (rc)
+               goto out;
+       if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+               DEBUG_ERR("%s unknown reply data format\n", __func__);
+               rc = -EIO;
+               goto out;
+       }
+       if (rep_pl->data_len > *datasize) {
+               DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
+                         __func__);
+               rc = -ENOSPC;
+               goto out;
+       }
+
+       /* copy the data from the cprb to the data buffer */
+       memcpy(databuf, rep_pl->data, rep_pl->data_len);
+       *datasize = rep_pl->data_len;
+
+out:
+       kfree(req);
+       kfree(rep);
+       kfree(urb);
+       return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+                    const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+       int rc;
+       struct ep11keyblob *kb;
+       u8 encbuf[64], *kek = NULL;
+       size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+       if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+               clrkeylen = keybitsize / 8;
+       else {
+               DEBUG_ERR(
+                       "%s unknown/unsupported keybitsize %d\n",
+                       __func__, keybitsize);
+               return -EINVAL;
+       }
+
+       /* allocate memory for the temp kek */
+       keklen = MAXEP11AESKEYBLOBSIZE;
+       kek = kmalloc(keklen, GFP_ATOMIC);
+       if (!kek) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       /* Step 1: generate AES 256 bit random kek key */
+       rc = ep11_genaeskey(card, domain, 256,
+                           0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */
+                           kek, &keklen);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s generate kek key failed, rc=%d\n",
+                       __func__, rc);
+               goto out;
+       }
+       kb = (struct ep11keyblob *) kek;
+       memset(&kb->head, 0, sizeof(kb->head));
+
+       /* Step 2: encrypt clear key value with the kek key */
+       rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+                             clrkey, clrkeylen, encbuf, &encbuflen);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s encrypting key value with kek key failed, rc=%d\n",
+                       __func__, rc);
+               goto out;
+       }
+
+       /* Step 3: import the encrypted key value as a new key */
+       rc = ep11_unwrapkey(card, domain, kek, keklen,
+                           encbuf, encbuflen, 0, def_iv,
+                           keybitsize, 0, keybuf, keybufsize);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s importing key value as new key failed,, rc=%d\n",
+                       __func__, rc);
+               goto out;
+       }
+
+out:
+       kfree(kek);
+       return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+                    u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+       int rc = -EIO;
+       u8 *wkbuf = NULL;
+       size_t wkbuflen = 256;
+       struct wk_info {
+               u16 version;
+               u8  res1[16];
+               u32 pkeytype;
+               u32 pkeybitsize;
+               u64 pkeysize;
+               u8  res2[8];
+               u8  pkey[0];
+       } __packed * wki;
+
+       /* alloc temp working buffer */
+       wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
+       if (!wkbuf)
+               return -ENOMEM;
+
+       /* ep11 secure key -> protected key + info */
+       rc = ep11_wrapkey(card, dom, key, keylen,
+                         0, def_iv, wkbuf, &wkbuflen);
+       if (rc) {
+               DEBUG_ERR(
+                       "%s rewrapping ep11 key to pkey failed, rc=%d\n",
+                       __func__, rc);
+               goto out;
+       }
+       wki = (struct wk_info *) wkbuf;
+
+       /* check struct version and pkey type */
+       if (wki->version != 1 || wki->pkeytype != 1) {
+               DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+                         __func__, (int) wki->version, (int) wki->pkeytype);
+               rc = -EIO;
+               goto out;
+       }
+
+       /* copy the tanslated protected key */
+       switch (wki->pkeysize) {
+       case 16+32:
+               /* AES 128 protected key */
+               if (protkeytype)
+                       *protkeytype = PKEY_KEYTYPE_AES_128;
+               break;
+       case 24+32:
+               /* AES 192 protected key */
+               if (protkeytype)
+                       *protkeytype = PKEY_KEYTYPE_AES_192;
+               break;
+       case 32+32:
+               /* AES 256 protected key */
+               if (protkeytype)
+                       *protkeytype = PKEY_KEYTYPE_AES_256;
+               break;
+       default:
+               DEBUG_ERR("%s unknown/unsupported pkeysize %d\n",
+                         __func__, (int) wki->pkeysize);
+               rc = -EIO;
+               goto out;
+       }
+       memcpy(protkey, wki->pkey, wki->pkeysize);
+       if (protkeylen)
+               *protkeylen = (u32) wki->pkeysize;
+       rc = 0;
+
+out:
+       kfree(wkbuf);
+       return rc;
+}
+EXPORT_SYMBOL(ep11_key2protkey);
+
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+                  int minhwtype, int minapi, const u8 *wkvp)
+{
+       struct zcrypt_device_status_ext *device_status;
+       u32 *_apqns = NULL, _nr_apqns = 0;
+       int i, card, dom, rc = -ENOMEM;
+       struct ep11_domain_info edi;
+       struct ep11_card_info eci;
+
+       /* fetch status of all crypto cards */
+       device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+                                     sizeof(struct zcrypt_device_status_ext),
+                                     GFP_KERNEL);
+       if (!device_status)
+               return -ENOMEM;
+       zcrypt_device_status_mask_ext(device_status);
+
+       /* allocate 1k space for up to 256 apqns */
+       _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+       if (!_apqns) {
+               kfree(device_status);
+               return -ENOMEM;
+       }
+
+       /* walk through all the crypto apqnss */
+       for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+               card = AP_QID_CARD(device_status[i].qid);
+               dom = AP_QID_QUEUE(device_status[i].qid);
+               /* check online state */
+               if (!device_status[i].online)
+                       continue;
+               /* check for ep11 functions */
+               if (!(device_status[i].functions & 0x01))
+                       continue;
+               /* check cardnr */
+               if (cardnr != 0xFFFF && card != cardnr)
+                       continue;
+               /* check domain */
+               if (domain != 0xFFFF && dom != domain)
+                       continue;
+               /* check min hardware type */
+               if (minhwtype && device_status[i].hwtype < minhwtype)
+                       continue;
+               /* check min api version if given */
+               if (minapi > 0) {
+                       if (ep11_get_card_info(card, &eci, 0))
+                               continue;
+                       if (minapi > eci.API_ord_nr)
+                               continue;
+               }
+               /* check wkvp if given */
+               if (wkvp) {
+                       if (ep11_get_domain_info(card, dom, &edi))
+                               continue;
+                       if (edi.cur_wk_state != '1')
+                               continue;
+                       if (memcmp(wkvp, edi.cur_wkvp, 16))
+                               continue;
+               }
+               /* apqn passed all filtering criterons, add to the array */
+               if (_nr_apqns < 256)
+                       _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+       }
+
+       /* nothing found ? */
+       if (!_nr_apqns) {
+               kfree(_apqns);
+               rc = -ENODEV;
+       } else {
+               /* no re-allocation, simple return the _apqns array */
+               *apqns = _apqns;
+               *nr_apqns = _nr_apqns;
+               rc = 0;
+       }
+
+       kfree(device_status);
+       return rc;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+void __exit zcrypt_ep11misc_exit(void)
+{
+       card_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644 (file)
index 0000000..e3ed5ed
--- /dev/null
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  Copyright IBM Corp. 2019
+ *  Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ *  Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define TOKVER_EP11_AES  0x03  /* EP11 AES key blob */
+
+#define EP11_API_V 4  /* highest known and supported EP11 API version */
+
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x200000
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+       union {
+               u8 session[32];
+               struct {
+                       u8  type;      /* 0x00 (TOKTYPE_NON_CCA) */
+                       u8  res0;      /* unused */
+                       u16 len;       /* total length in bytes of this blob */
+                       u8  version;   /* 0x06 (TOKVER_EP11_AES) */
+                       u8  res1;      /* unused */
+                       u16 keybitlen; /* clear key bit len, 0 for unknown */
+               } head;
+       };
+       u8  wkvp[16];  /* wrapping key verification pattern */
+       u64 attr;      /* boolean key attributes */
+       u64 mode;      /* mode bits */
+       u16 version;   /* 0x1234, EP11_STRUCT_MAGIC */
+       u8  iv[14];
+       u8  encrypted_key_data[144];
+       u8  mac[32];
+} __packed;
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ * If keybitsize is given, the bitsize of the key is also checked.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+                         const u8 *key, int keybitsize,
+                         int checkcpacfexport);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+       u32  API_ord_nr;    /* API ordinal number */
+       u16  FW_version;    /* Firmware major and minor version */
+       char serial[16];    /* serial number string (16 ascii, no 0x00 !) */
+       u64  op_mode;       /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+       char cur_wk_state;  /* '0' invalid, '1' valid */
+       char new_wk_state;  /* '0' empty, '1' uncommitted, '2' committed */
+       u8   cur_wkvp[32];  /* current wrapping key verification pattern */
+       u8   new_wkvp[32];  /* new wrapping key verification pattern */
+       u64  op_mode;       /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+                  u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+                    const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from EP11 AES secure key blob.
+ */
+int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen,
+                    u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ *   to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ *   key for this domain. When a wkvp is given there will aways be a re-fetch
+ *   of the domain info for the potential apqn - so this triggers an request
+ *   reply to each apqn eligible.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+                  int minhwtype, int minapi, const u8 *wkvp);
+
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */
index 80c5a23..7b49e2e 100644 (file)
@@ -617,6 +617,13 @@ static const struct file_operations esas2r_proc_fops = {
        .unlocked_ioctl = esas2r_proc_ioctl,
 };
 
+static const struct proc_ops esas2r_proc_ops = {
+       .proc_ioctl             = esas2r_proc_ioctl,
+#ifdef CONFIG_COMPAT
+       .proc_compat_ioctl      = compat_ptr_ioctl,
+#endif
+};
+
 static struct Scsi_Host *esas2r_proc_host;
 static int esas2r_proc_major;
 
@@ -728,7 +735,7 @@ const char *esas2r_info(struct Scsi_Host *sh)
 
                        pde = proc_create(ATTONODE_NAME, 0,
                                          sh->hostt->proc_dir,
-                                         &esas2r_proc_fops);
+                                         &esas2r_proc_ops);
 
                        if (!pde) {
                                esas2r_log_dev(ESAS2R_LOG_WARN,
index df14597..eed3102 100644 (file)
@@ -736,13 +736,12 @@ out:
        return err;
 }
 
-static const struct file_operations scsi_devinfo_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = proc_scsi_devinfo_open,
-       .read           = seq_read,
-       .write          = proc_scsi_devinfo_write,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops scsi_devinfo_proc_ops = {
+       .proc_open      = proc_scsi_devinfo_open,
+       .proc_read      = seq_read,
+       .proc_write     = proc_scsi_devinfo_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 #endif /* CONFIG_SCSI_PROC_FS */
 
@@ -867,7 +866,7 @@ int __init scsi_init_devinfo(void)
        }
 
 #ifdef CONFIG_SCSI_PROC_FS
-       p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
+       p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_ops);
        if (!p) {
                error = -ENOMEM;
                goto out;
index 5b31322..d6982d3 100644 (file)
@@ -83,12 +83,12 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file)
                                4 * PAGE_SIZE);
 }
 
-static const struct file_operations proc_scsi_fops = {
-       .open = proc_scsi_host_open,
-       .release = single_release,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .write = proc_scsi_host_write
+static const struct proc_ops proc_scsi_ops = {
+       .proc_open      = proc_scsi_host_open,
+       .proc_release   = single_release,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = proc_scsi_host_write
 };
 
 /**
@@ -146,7 +146,7 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
 
        sprintf(name,"%d", shost->host_no);
        p = proc_create_data(name, S_IRUGO | S_IWUSR,
-               sht->proc_dir, &proc_scsi_fops, shost);
+               sht->proc_dir, &proc_scsi_ops, shost);
        if (!p)
                printk(KERN_ERR "%s: Failed to register host %d in"
                       "%s\n", __func__, shost->host_no,
@@ -436,13 +436,12 @@ static int proc_scsi_open(struct inode *inode, struct file *file)
        return seq_open(file, &scsi_seq_ops);
 }
 
-static const struct file_operations proc_scsi_operations = {
-       .owner          = THIS_MODULE,
-       .open           = proc_scsi_open,
-       .read           = seq_read,
-       .write          = proc_scsi_write,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops scsi_scsi_proc_ops = {
+       .proc_open      = proc_scsi_open,
+       .proc_read      = seq_read,
+       .proc_write     = proc_scsi_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 /**
@@ -456,7 +455,7 @@ int __init scsi_init_procfs(void)
        if (!proc_scsi)
                goto err1;
 
-       pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations);
+       pde = proc_create("scsi/scsi", 0, NULL, &scsi_scsi_proc_ops);
        if (!pde)
                goto err2;
 
index bafeaf7..4e6af59 100644 (file)
@@ -2322,25 +2322,23 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v);
 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
                                  size_t count, loff_t *off);
-static const struct file_operations adio_fops = {
-       .owner = THIS_MODULE,
-       .open = sg_proc_single_open_adio,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .write = sg_proc_write_adio,
-       .release = single_release,
+static const struct proc_ops adio_proc_ops = {
+       .proc_open      = sg_proc_single_open_adio,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = sg_proc_write_adio,
+       .proc_release   = single_release,
 };
 
 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
 static ssize_t sg_proc_write_dressz(struct file *filp, 
                const char __user *buffer, size_t count, loff_t *off);
-static const struct file_operations dressz_fops = {
-       .owner = THIS_MODULE,
-       .open = sg_proc_single_open_dressz,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .write = sg_proc_write_dressz,
-       .release = single_release,
+static const struct proc_ops dressz_proc_ops = {
+       .proc_open      = sg_proc_single_open_dressz,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = sg_proc_write_dressz,
+       .proc_release   = single_release,
 };
 
 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
@@ -2381,9 +2379,9 @@ sg_proc_init(void)
        if (!p)
                return 1;
 
-       proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_fops);
+       proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_proc_ops);
        proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops);
-       proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_fops);
+       proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_proc_ops);
        proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr);
        proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops);
        proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops);
index c7266ef..1f59beb 100644 (file)
@@ -646,8 +646,7 @@ static int orion_spi_probe(struct platform_device *pdev)
 
        /* The following clock is only used by some SoCs */
        spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
-       if (IS_ERR(spi->axi_clk) &&
-           PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+       if (PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
                status = -EPROBE_DEFER;
                goto out_rel_clk;
        }
index 4a6c3f6..aa9dab8 100644 (file)
@@ -264,12 +264,12 @@ static int open_debug_level(struct inode *inode, struct file *file)
        return single_open(file, show_debug_level, NULL);
 }
 
-static const struct file_operations fops = {
-       .open = open_debug_level,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .write = write_debug_level,
-       .release = single_release,
+static const struct proc_ops debug_level_proc_ops = {
+       .proc_open      = open_debug_level,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = write_debug_level,
+       .proc_release   = single_release,
 };
 
 int __init ieee80211_debug_init(void)
@@ -284,7 +284,7 @@ int __init ieee80211_debug_init(void)
                                " proc directory\n");
                return -EIO;
        }
-       e = proc_create("debug_level", 0644, ieee80211_proc, &fops);
+       e = proc_create("debug_level", 0644, ieee80211_proc, &debug_level_proc_ops);
        if (!e) {
                remove_proc_entry(DRV_NAME, init_net.proc_net);
                ieee80211_proc = NULL;
index 1d4f317..f724962 100644 (file)
@@ -1101,15 +1101,15 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
        return count;
 }
 
-static const struct file_operations proc_sysrq_trigger_operations = {
-       .write          = write_sysrq_trigger,
-       .llseek         = noop_llseek,
+static const struct proc_ops sysrq_trigger_proc_ops = {
+       .proc_write     = write_sysrq_trigger,
+       .proc_lseek     = noop_llseek,
 };
 
 static void sysrq_init_procfs(void)
 {
        if (!proc_create("sysrq-trigger", S_IWUSR, NULL,
-                        &proc_sysrq_trigger_operations))
+                        &sysrq_trigger_proc_ops))
                pr_err("Failed to register proc interface\n");
 }
 
index 04c142c..64de9f1 100644 (file)
@@ -72,7 +72,7 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params,
 
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
 
-static const struct file_operations rndis_proc_fops;
+static const struct proc_ops rndis_proc_ops;
 
 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
 
@@ -902,7 +902,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
 
                sprintf(name, NAME_TEMPLATE, i);
                proc_entry = proc_create_data(name, 0660, NULL,
-                                             &rndis_proc_fops, params);
+                                             &rndis_proc_ops, params);
                if (!proc_entry) {
                        kfree(params);
                        rndis_put_nr(i);
@@ -1164,13 +1164,12 @@ static int rndis_proc_open(struct inode *inode, struct file *file)
        return single_open(file, rndis_proc_show, PDE_DATA(inode));
 }
 
-static const struct file_operations rndis_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = rndis_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = rndis_proc_write,
+static const struct proc_ops rndis_proc_ops = {
+       .proc_open      = rndis_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = rndis_proc_write,
 };
 
 #define        NAME_TEMPLATE "driver/rndis-%03d"
index 7570c76..8ad14e5 100644 (file)
@@ -74,7 +74,7 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev,
        return count;
 }
 
-MDEV_TYPE_ATTR_WO(create);
+static MDEV_TYPE_ATTR_WO(create);
 
 static void mdev_type_release(struct kobject *kobj)
 {
index f2983f0..df4d960 100644 (file)
@@ -97,8 +97,10 @@ static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
 
        /* If there were any mappings at all... */
        if (data->mm) {
-               ret = mm_iommu_put(data->mm, data->mem);
-               WARN_ON(ret);
+               if (data->mem) {
+                       ret = mm_iommu_put(data->mm, data->mem);
+                       WARN_ON(ret);
+               }
 
                mmdrop(data->mm);
        }
@@ -159,7 +161,7 @@ static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
        data->useraddr = vma->vm_start;
        data->mm = current->mm;
 
-       atomic_inc(&data->mm->mm_count);
+       mmgrab(data->mm);
        ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
                        vma_pages(vma), data->gpu_hpa, &data->mem);
 
index 40d4fb9..abdca90 100644 (file)
@@ -24,7 +24,7 @@
 #define MDIO_AN_INT            0x8002
 #define MDIO_AN_INTMASK                0x8001
 
-static unsigned int xmdio_read(void *ioaddr, unsigned int mmd,
+static unsigned int xmdio_read(void __iomem *ioaddr, unsigned int mmd,
                               unsigned int reg)
 {
        unsigned int mmd_address, value;
@@ -35,7 +35,7 @@ static unsigned int xmdio_read(void *ioaddr, unsigned int mmd,
        return value;
 }
 
-static void xmdio_write(void *ioaddr, unsigned int mmd,
+static void xmdio_write(void __iomem *ioaddr, unsigned int mmd,
                        unsigned int reg, unsigned int value)
 {
        unsigned int mmd_address;
index 26cef65..16b3adc 100644 (file)
@@ -79,7 +79,7 @@ static long tce_iommu_mm_set(struct tce_container *container)
        }
        BUG_ON(!current->mm);
        container->mm = current->mm;
-       atomic_inc(&container->mm->mm_count);
+       mmgrab(container->mm);
 
        return 0;
 }
index 08a17eb..370bf25 100644 (file)
@@ -1017,7 +1017,7 @@ static int imxfb_probe(struct platform_device *pdev)
        }
 
        fbi->lcd_pwr = devm_regulator_get(&pdev->dev, "lcd");
-       if (IS_ERR(fbi->lcd_pwr) && (PTR_ERR(fbi->lcd_pwr) == -EPROBE_DEFER)) {
+       if (PTR_ERR(fbi->lcd_pwr) == -EPROBE_DEFER) {
                ret = -EPROBE_DEFER;
                goto failed_lcd;
        }
index f815f98..852673c 100644 (file)
@@ -1173,13 +1173,12 @@ static ssize_t viafb_dvp0_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations viafb_dvp0_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = viafb_dvp0_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = viafb_dvp0_proc_write,
+static const struct proc_ops viafb_dvp0_proc_ops = {
+       .proc_open      = viafb_dvp0_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = viafb_dvp0_proc_write,
 };
 
 static int viafb_dvp1_proc_show(struct seq_file *m, void *v)
@@ -1238,13 +1237,12 @@ static ssize_t viafb_dvp1_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations viafb_dvp1_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = viafb_dvp1_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = viafb_dvp1_proc_write,
+static const struct proc_ops viafb_dvp1_proc_ops = {
+       .proc_open      = viafb_dvp1_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = viafb_dvp1_proc_write,
 };
 
 static int viafb_dfph_proc_show(struct seq_file *m, void *v)
@@ -1273,13 +1271,12 @@ static ssize_t viafb_dfph_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations viafb_dfph_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = viafb_dfph_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = viafb_dfph_proc_write,
+static const struct proc_ops viafb_dfph_proc_ops = {
+       .proc_open      = viafb_dfph_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = viafb_dfph_proc_write,
 };
 
 static int viafb_dfpl_proc_show(struct seq_file *m, void *v)
@@ -1308,13 +1305,12 @@ static ssize_t viafb_dfpl_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations viafb_dfpl_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = viafb_dfpl_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = viafb_dfpl_proc_write,
+static const struct proc_ops viafb_dfpl_proc_ops = {
+       .proc_open      = viafb_dfpl_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = viafb_dfpl_proc_write,
 };
 
 static int viafb_vt1636_proc_show(struct seq_file *m, void *v)
@@ -1444,13 +1440,12 @@ static ssize_t viafb_vt1636_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations viafb_vt1636_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = viafb_vt1636_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = viafb_vt1636_proc_write,
+static const struct proc_ops viafb_vt1636_proc_ops = {
+       .proc_open      = viafb_vt1636_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = viafb_vt1636_proc_write,
 };
 
 #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
@@ -1522,13 +1517,12 @@ static ssize_t viafb_iga1_odev_proc_write(struct file *file,
        return res;
 }
 
-static const struct file_operations viafb_iga1_odev_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = viafb_iga1_odev_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = viafb_iga1_odev_proc_write,
+static const struct proc_ops viafb_iga1_odev_proc_ops = {
+       .proc_open      = viafb_iga1_odev_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = viafb_iga1_odev_proc_write,
 };
 
 static int viafb_iga2_odev_proc_show(struct seq_file *m, void *v)
@@ -1562,13 +1556,12 @@ static ssize_t viafb_iga2_odev_proc_write(struct file *file,
        return res;
 }
 
-static const struct file_operations viafb_iga2_odev_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = viafb_iga2_odev_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = viafb_iga2_odev_proc_write,
+static const struct proc_ops viafb_iga2_odev_proc_ops = {
+       .proc_open      = viafb_iga2_odev_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = viafb_iga2_odev_proc_write,
 };
 
 #define IS_VT1636(lvds_chip)   ((lvds_chip).lvds_chip_name == VT1636_LVDS)
@@ -1580,14 +1573,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
        shared->proc_entry = viafb_entry;
        if (viafb_entry) {
 #ifdef CONFIG_FB_VIA_DIRECT_PROCFS
-               proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_fops);
-               proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_fops);
-               proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_fops);
-               proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_fops);
+               proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_ops);
+               proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_ops);
+               proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_ops);
+               proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_ops);
                if (IS_VT1636(shared->chip_info.lvds_chip_info)
                        || IS_VT1636(shared->chip_info.lvds_chip_info2))
                        proc_create("vt1636", 0, viafb_entry,
-                               &viafb_vt1636_proc_fops);
+                                   &viafb_vt1636_proc_ops);
 #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
 
                proc_create_single("supported_output_devices", 0, viafb_entry,
@@ -1595,11 +1588,11 @@ static void viafb_init_proc(struct viafb_shared *shared)
                iga1_entry = proc_mkdir("iga1", viafb_entry);
                shared->iga1_proc_entry = iga1_entry;
                proc_create("output_devices", 0, iga1_entry,
-                       &viafb_iga1_odev_proc_fops);
+                           &viafb_iga1_odev_proc_ops);
                iga2_entry = proc_mkdir("iga2", viafb_entry);
                shared->iga2_proc_entry = iga2_entry;
                proc_create("output_devices", 0, iga2_entry,
-                       &viafb_iga2_odev_proc_fops);
+                           &viafb_iga2_odev_proc_ops);
        }
 }
 static void viafb_remove_proc(struct viafb_shared *shared)
index 4fc83e3..0258415 100644 (file)
@@ -1006,19 +1006,19 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
        }
        mutex_unlock(&priv->lock);
 
-       /*
-        * gntdev takes the address of the PTE in find_grant_ptes() and passes
-        * it to the hypervisor in gntdev_map_grant_pages(). The purpose of
-        * the notifier is to prevent the hypervisor pointer to the PTE from
-        * going stale.
-        *
-        * Since this vma's mappings can't be touched without the mmap_sem,
-        * and we are holding it now, there is no need for the notifier_range
-        * locking pattern.
-        */
-       mmu_interval_read_begin(&map->notifier);
-
        if (use_ptemod) {
+               /*
+                * gntdev takes the address of the PTE in find_grant_ptes() and
+                * passes it to the hypervisor in gntdev_map_grant_pages(). The
+                * purpose of the notifier is to prevent the hypervisor pointer
+                * to the PTE from going stale.
+                *
+                * Since this vma's mappings can't be touched without the
+                * mmap_sem, and we are holding it now, there is no need for
+                * the notifier_range locking pattern.
+                */
+               mmu_interval_read_begin(&map->notifier);
+
                map->pages_vm_start = vma->vm_start;
                err = apply_to_page_range(vma->vm_mm, vma->vm_start,
                                          vma->vm_end - vma->vm_start,
index 6d12fc3..a8d2443 100644 (file)
@@ -94,7 +94,7 @@ static void watch_target(struct xenbus_watch *watch,
                                  "%llu", &static_max) == 1))
                        static_max >>= PAGE_SHIFT - 10;
                else
-                       static_max = new_target;
+                       static_max = balloon_stats.current_pages;
 
                target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
                                : static_max - balloon_stats.target_pages;
index 6011171..b20e43e 100644 (file)
@@ -286,6 +286,43 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
        return xen_pcibios_err_to_errno(err);
 }
 
+int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+{
+       int err;
+       u16 val;
+       int ret = 0;
+
+       err = pci_read_config_word(dev, PCI_COMMAND, &val);
+       if (err)
+               return err;
+       if (!(val & PCI_COMMAND_INTX_DISABLE))
+               ret |= INTERRUPT_TYPE_INTX;
+
+       /*
+        * Do not trust dev->msi(x)_enabled here, as enabling could be done
+        * bypassing the pci_*msi* functions, by the qemu.
+        */
+       if (dev->msi_cap) {
+               err = pci_read_config_word(dev,
+                               dev->msi_cap + PCI_MSI_FLAGS,
+                               &val);
+               if (err)
+                       return err;
+               if (val & PCI_MSI_FLAGS_ENABLE)
+                       ret |= INTERRUPT_TYPE_MSI;
+       }
+       if (dev->msix_cap) {
+               err = pci_read_config_word(dev,
+                               dev->msix_cap + PCI_MSIX_FLAGS,
+                               &val);
+               if (err)
+                       return err;
+               if (val & PCI_MSIX_FLAGS_ENABLE)
+                       ret |= INTERRUPT_TYPE_MSIX;
+       }
+       return ret;
+}
+
 void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
 {
        struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
index 22db630..28c4518 100644 (file)
@@ -65,6 +65,11 @@ struct config_field_entry {
        void *data;
 };
 
+#define INTERRUPT_TYPE_NONE (1<<0)
+#define INTERRUPT_TYPE_INTX (1<<1)
+#define INTERRUPT_TYPE_MSI  (1<<2)
+#define INTERRUPT_TYPE_MSIX (1<<3)
+
 extern bool xen_pcibk_permissive;
 
 #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
@@ -126,4 +131,6 @@ int xen_pcibk_config_capability_init(void);
 int xen_pcibk_config_header_add_fields(struct pci_dev *dev);
 int xen_pcibk_config_capability_add_fields(struct pci_dev *dev);
 
+int xen_pcibk_get_interrupt_type(struct pci_dev *dev);
+
 #endif                         /* __XEN_PCIBACK_CONF_SPACE_H__ */
index e569413..22f13ab 100644 (file)
@@ -189,6 +189,85 @@ static const struct config_field caplist_pm[] = {
        {}
 };
 
+static struct msi_msix_field_config {
+       u16          enable_bit; /* bit for enabling MSI/MSI-X */
+       unsigned int int_type;   /* interrupt type for exclusiveness check */
+} msi_field_config = {
+       .enable_bit     = PCI_MSI_FLAGS_ENABLE,
+       .int_type       = INTERRUPT_TYPE_MSI,
+}, msix_field_config = {
+       .enable_bit     = PCI_MSIX_FLAGS_ENABLE,
+       .int_type       = INTERRUPT_TYPE_MSIX,
+};
+
+static void *msi_field_init(struct pci_dev *dev, int offset)
+{
+       return &msi_field_config;
+}
+
+static void *msix_field_init(struct pci_dev *dev, int offset)
+{
+       return &msix_field_config;
+}
+
+static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
+                               void *data)
+{
+       int err;
+       u16 old_value;
+       const struct msi_msix_field_config *field_config = data;
+       const struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+
+       if (xen_pcibk_permissive || dev_data->permissive)
+               goto write;
+
+       err = pci_read_config_word(dev, offset, &old_value);
+       if (err)
+               return err;
+
+       if (new_value == old_value)
+               return 0;
+
+       if (!dev_data->allow_interrupt_control ||
+           (new_value ^ old_value) & ~field_config->enable_bit)
+               return PCIBIOS_SET_FAILED;
+
+       if (new_value & field_config->enable_bit) {
+               /* don't allow enabling together with other interrupt types */
+               int int_type = xen_pcibk_get_interrupt_type(dev);
+
+               if (int_type == INTERRUPT_TYPE_NONE ||
+                   int_type == field_config->int_type)
+                       goto write;
+               return PCIBIOS_SET_FAILED;
+       }
+
+write:
+       return pci_write_config_word(dev, offset, new_value);
+}
+
+static const struct config_field caplist_msix[] = {
+       {
+               .offset    = PCI_MSIX_FLAGS,
+               .size      = 2,
+               .init      = msix_field_init,
+               .u.w.read  = xen_pcibk_read_config_word,
+               .u.w.write = msi_msix_flags_write,
+       },
+       {}
+};
+
+static const struct config_field caplist_msi[] = {
+       {
+               .offset    = PCI_MSI_FLAGS,
+               .size      = 2,
+               .init      = msi_field_init,
+               .u.w.read  = xen_pcibk_read_config_word,
+               .u.w.write = msi_msix_flags_write,
+       },
+       {}
+};
+
 static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
        .capability = PCI_CAP_ID_PM,
        .fields = caplist_pm,
@@ -197,11 +276,21 @@ static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
        .capability = PCI_CAP_ID_VPD,
        .fields = caplist_vpd,
 };
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_msi = {
+       .capability = PCI_CAP_ID_MSI,
+       .fields = caplist_msi,
+};
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_msix = {
+       .capability = PCI_CAP_ID_MSIX,
+       .fields = caplist_msix,
+};
 
 int xen_pcibk_config_capability_init(void)
 {
        register_capability(&xen_pcibk_config_capability_vpd);
        register_capability(&xen_pcibk_config_capability_pm);
+       register_capability(&xen_pcibk_config_capability_msi);
+       register_capability(&xen_pcibk_config_capability_msix);
 
        return 0;
 }
index 10ae24b..fb4fccb 100644 (file)
@@ -117,6 +117,25 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
                pci_clear_mwi(dev);
        }
 
+       if (dev_data && dev_data->allow_interrupt_control) {
+               if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
+                       if (value & PCI_COMMAND_INTX_DISABLE) {
+                               pci_intx(dev, 0);
+                       } else {
+                               /* Do not allow enabling INTx together with MSI or MSI-X. */
+                               switch (xen_pcibk_get_interrupt_type(dev)) {
+                               case INTERRUPT_TYPE_NONE:
+                                       pci_intx(dev, 1);
+                                       break;
+                               case INTERRUPT_TYPE_INTX:
+                                       break;
+                               default:
+                                       return PCIBIOS_SET_FAILED;
+                               }
+                       }
+               }
+       }
+
        cmd->val = value;
 
        if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
index 097410a..7af93d6 100644 (file)
@@ -304,6 +304,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
        xen_pcibk_config_reset_dev(dev);
        xen_pcibk_config_free_dyn_fields(dev);
 
+       dev_data->allow_interrupt_control = 0;
+
        xen_unregister_device_domain_owner(dev);
 
        spin_lock_irqsave(&found_psdev->lock, flags);
@@ -1431,6 +1433,65 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)
 }
 static DRIVER_ATTR_RW(permissive);
 
+static ssize_t allow_interrupt_control_store(struct device_driver *drv,
+                                            const char *buf, size_t count)
+{
+       int domain, bus, slot, func;
+       int err;
+       struct pcistub_device *psdev;
+       struct xen_pcibk_dev_data *dev_data;
+
+       err = str_to_slot(buf, &domain, &bus, &slot, &func);
+       if (err)
+               goto out;
+
+       psdev = pcistub_device_find(domain, bus, slot, func);
+       if (!psdev) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       dev_data = pci_get_drvdata(psdev->dev);
+       /* the driver data for a device should never be null at this point */
+       if (!dev_data) {
+               err = -ENXIO;
+               goto release;
+       }
+       dev_data->allow_interrupt_control = 1;
+release:
+       pcistub_device_put(psdev);
+out:
+       if (!err)
+               err = count;
+       return err;
+}
+
+static ssize_t allow_interrupt_control_show(struct device_driver *drv,
+                                           char *buf)
+{
+       struct pcistub_device *psdev;
+       struct xen_pcibk_dev_data *dev_data;
+       size_t count = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pcistub_devices_lock, flags);
+       list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+               if (count >= PAGE_SIZE)
+                       break;
+               if (!psdev->dev)
+                       continue;
+               dev_data = pci_get_drvdata(psdev->dev);
+               if (!dev_data || !dev_data->allow_interrupt_control)
+                       continue;
+               count +=
+                   scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
+                             pci_name(psdev->dev));
+       }
+       spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+       return count;
+}
+static DRIVER_ATTR_RW(allow_interrupt_control);
+
 static void pcistub_exit(void)
 {
        driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
@@ -1440,6 +1501,8 @@ static void pcistub_exit(void)
        driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
        driver_remove_file(&xen_pcibk_pci_driver.driver,
                           &driver_attr_permissive);
+       driver_remove_file(&xen_pcibk_pci_driver.driver,
+                          &driver_attr_allow_interrupt_control);
        driver_remove_file(&xen_pcibk_pci_driver.driver,
                           &driver_attr_irq_handlers);
        driver_remove_file(&xen_pcibk_pci_driver.driver,
@@ -1530,6 +1593,9 @@ static int __init pcistub_init(void)
        if (!err)
                err = driver_create_file(&xen_pcibk_pci_driver.driver,
                                         &driver_attr_permissive);
+       if (!err)
+               err = driver_create_file(&xen_pcibk_pci_driver.driver,
+                                        &driver_attr_allow_interrupt_control);
 
        if (!err)
                err = driver_create_file(&xen_pcibk_pci_driver.driver,
index 263c059..ce1077e 100644 (file)
@@ -45,6 +45,7 @@ struct xen_pcibk_dev_data {
        struct list_head config_fields;
        struct pci_saved_state *pci_saved_state;
        unsigned int permissive:1;
+       unsigned int allow_interrupt_control:1;
        unsigned int warned_on_write:1;
        unsigned int enable_intx:1;
        unsigned int isr_on:1; /* Whether the IRQ handler is installed. */
index 378486b..66975da 100644 (file)
@@ -239,7 +239,9 @@ int xenbus_dev_probe(struct device *_dev)
                goto fail;
        }
 
+       spin_lock(&dev->reclaim_lock);
        err = drv->probe(dev, id);
+       spin_unlock(&dev->reclaim_lock);
        if (err)
                goto fail_put;
 
@@ -268,8 +270,11 @@ int xenbus_dev_remove(struct device *_dev)
 
        free_otherend_watch(dev);
 
-       if (drv->remove)
+       if (drv->remove) {
+               spin_lock(&dev->reclaim_lock);
                drv->remove(dev);
+               spin_unlock(&dev->reclaim_lock);
+       }
 
        module_put(drv->driver.owner);
 
@@ -468,6 +473,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
                goto fail;
 
        dev_set_name(&xendev->dev, "%s", devname);
+       spin_lock_init(&xendev->reclaim_lock);
 
        /* Register with generic device framework. */
        err = device_register(&xendev->dev);
index 14876fa..791f6fe 100644 (file)
@@ -247,6 +247,41 @@ static int backend_probe_and_watch(struct notifier_block *notifier,
        return NOTIFY_DONE;
 }
 
+static int backend_reclaim_memory(struct device *dev, void *data)
+{
+       const struct xenbus_driver *drv;
+       struct xenbus_device *xdev;
+
+       if (!dev->driver)
+               return 0;
+       drv = to_xenbus_driver(dev->driver);
+       if (drv && drv->reclaim_memory) {
+               xdev = to_xenbus_device(dev);
+               if (!spin_trylock(&xdev->reclaim_lock))
+                       return 0;
+               drv->reclaim_memory(xdev);
+               spin_unlock(&xdev->reclaim_lock);
+       }
+       return 0;
+}
+
+/*
+ * Returns 0 always because we are using shrinker to only detect memory
+ * pressure.
+ */
+static unsigned long backend_shrink_memory_count(struct shrinker *shrinker,
+                               struct shrink_control *sc)
+{
+       bus_for_each_dev(&xenbus_backend.bus, NULL, NULL,
+                       backend_reclaim_memory);
+       return 0;
+}
+
+static struct shrinker backend_memory_shrinker = {
+       .count_objects = backend_shrink_memory_count,
+       .seeks = DEFAULT_SEEKS,
+};
+
 static int __init xenbus_probe_backend_init(void)
 {
        static struct notifier_block xenstore_notifier = {
@@ -263,6 +298,9 @@ static int __init xenbus_probe_backend_init(void)
 
        register_xenstore_notifier(&xenstore_notifier);
 
+       if (register_shrinker(&backend_memory_shrinker))
+               pr_warn("shrinker registration failed\n");
+
        return 0;
 }
 subsys_initcall(xenbus_probe_backend_init);
index 2e4ca4d..1c9ae08 100644 (file)
@@ -56,10 +56,9 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
        return nbytes;
 }
 
-static const struct file_operations proc_bus_zorro_operations = {
-       .owner          = THIS_MODULE,
-       .llseek         = proc_bus_zorro_lseek,
-       .read           = proc_bus_zorro_read,
+static const struct proc_ops bus_zorro_proc_ops = {
+       .proc_lseek     = proc_bus_zorro_lseek,
+       .proc_read      = proc_bus_zorro_read,
 };
 
 static void * zorro_seq_start(struct seq_file *m, loff_t *pos)
@@ -105,7 +104,7 @@ static int __init zorro_proc_attach_device(unsigned int slot)
 
        sprintf(name, "%02x", slot);
        entry = proc_create_data(name, 0, proc_bus_zorro_dir,
-                                &proc_bus_zorro_operations,
+                                &bus_zorro_proc_ops,
                                 &zorro_autocon[slot]);
        if (!entry)
                return -ENOMEM;
index a9fbad2..5f3d3d8 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1610,6 +1610,14 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
        return 0;
 }
 
+static void aio_poll_put_work(struct work_struct *work)
+{
+       struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+       struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+
+       iocb_put(iocb);
+}
+
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1674,6 +1682,8 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        list_del_init(&req->wait.entry);
 
        if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+               struct kioctx *ctx = iocb->ki_ctx;
+
                /*
                 * Try to complete the iocb inline if we can. Use
                 * irqsave/irqrestore because not all filesystems (e.g. fuse)
@@ -1683,8 +1693,14 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                list_del(&iocb->ki_list);
                iocb->ki_res.res = mangle_poll(mask);
                req->done = true;
-               spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-               iocb_put(iocb);
+               if (iocb->ki_eventfd && eventfd_signal_count()) {
+                       iocb = NULL;
+                       INIT_WORK(&req->work, aio_poll_put_work);
+                       schedule_work(&req->work);
+               }
+               spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+               if (iocb)
+                       iocb_put(iocb);
        } else {
                schedule_work(&req->work);
        }
index df28035..b4bbdbd 100644 (file)
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -183,18 +183,12 @@ void setattr_copy(struct inode *inode, const struct iattr *attr)
                inode->i_uid = attr->ia_uid;
        if (ia_valid & ATTR_GID)
                inode->i_gid = attr->ia_gid;
-       if (ia_valid & ATTR_ATIME) {
-               inode->i_atime = timestamp_truncate(attr->ia_atime,
-                                                 inode);
-       }
-       if (ia_valid & ATTR_MTIME) {
-               inode->i_mtime = timestamp_truncate(attr->ia_mtime,
-                                                 inode);
-       }
-       if (ia_valid & ATTR_CTIME) {
-               inode->i_ctime = timestamp_truncate(attr->ia_ctime,
-                                                 inode);
-       }
+       if (ia_valid & ATTR_ATIME)
+               inode->i_atime = attr->ia_atime;
+       if (ia_valid & ATTR_MTIME)
+               inode->i_mtime = attr->ia_mtime;
+       if (ia_valid & ATTR_CTIME)
+               inode->i_ctime = attr->ia_ctime;
        if (ia_valid & ATTR_MODE) {
                umode_t mode = attr->ia_mode;
 
@@ -268,8 +262,13 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
        attr->ia_ctime = now;
        if (!(ia_valid & ATTR_ATIME_SET))
                attr->ia_atime = now;
+       else
+               attr->ia_atime = timestamp_truncate(attr->ia_atime, inode);
        if (!(ia_valid & ATTR_MTIME_SET))
                attr->ia_mtime = now;
+       else
+               attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode);
+
        if (ia_valid & ATTR_KILL_PRIV) {
                error = security_inode_need_killpriv(dentry);
                if (error < 0)
index c1da294..0a0823d 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_CEPH_FS) += ceph.o
 ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
        export.o caps.o snap.o xattr.o quota.o io.o \
        mds_client.o mdsmap.o strings.o ceph_frag.o \
-       debugfs.o
+       debugfs.o util.o
 
 ceph-$(CONFIG_CEPH_FSCACHE) += cache.o
 ceph-$(CONFIG_CEPH_FS_POSIX_ACL) += acl.o
index aa55f41..26be652 100644 (file)
@@ -222,8 +222,8 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
                err = ceph_pagelist_reserve(pagelist, len + val_size2 + 8);
                if (err)
                        goto out_err;
-               err = ceph_pagelist_encode_string(pagelist,
-                                                 XATTR_NAME_POSIX_ACL_DEFAULT, len);
+               ceph_pagelist_encode_string(pagelist,
+                                         XATTR_NAME_POSIX_ACL_DEFAULT, len);
                err = posix_acl_to_xattr(&init_user_ns, default_acl,
                                         tmp_buf, val_size2);
                if (err < 0)
index 9d09bb5..28ae0c1 100644 (file)
@@ -908,7 +908,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
                                                       ci_node);
                                        if (!__cap_is_valid(cap))
                                                continue;
-                                       __touch_cap(cap);
+                                       if (cap->issued & mask)
+                                               __touch_cap(cap);
                                }
                        }
                        return 1;
index c281f32..fb7cabd 100644 (file)
@@ -33,7 +33,7 @@ static int mdsmap_show(struct seq_file *s, void *p)
        seq_printf(s, "max_mds %d\n", mdsmap->m_max_mds);
        seq_printf(s, "session_timeout %d\n", mdsmap->m_session_timeout);
        seq_printf(s, "session_autoclose %d\n", mdsmap->m_session_autoclose);
-       for (i = 0; i < mdsmap->m_num_mds; i++) {
+       for (i = 0; i < mdsmap->possible_max_rank; i++) {
                struct ceph_entity_addr *addr = &mdsmap->m_info[i].addr;
                int state = mdsmap->m_info[i].state;
                seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
index 2e4764f..d0cd0ab 100644 (file)
@@ -1186,7 +1186,7 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
        struct dentry *dn = di->dentry;
        struct ceph_mds_client *mdsc;
 
-       dout("dentry_dir_lease_touch %p %p '%pd' (offset %lld)\n",
+       dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n",
             di, dn, dn, di->offset);
 
        if (!list_empty(&di->lease_list)) {
@@ -1567,7 +1567,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                inode = d_inode(dentry);
        }
 
-       dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
+       dout("d_revalidate %p '%pd' inode %p offset 0x%llx\n", dentry,
             dentry, inode, ceph_dentry(dentry)->offset);
 
        /* always trust cached snapped dentries, snapdir dentry */
index 11929d2..c3b8e8e 100644 (file)
@@ -1974,6 +1974,9 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
        if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
                return -EOPNOTSUPP;
 
+       if (!src_fsc->have_copy_from2)
+               return -EOPNOTSUPP;
+
        /*
         * Striped file layouts require that we copy partial objects, but the
         * OSD copy-from operation only supports full-object copies.  Limit
@@ -2101,8 +2104,14 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
                        CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
                        &dst_oid, &dst_oloc,
                        CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
-                       CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
+                       CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
+                       dst_ci->i_truncate_seq, dst_ci->i_truncate_size,
+                       CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
                if (err) {
+                       if (err == -EOPNOTSUPP) {
+                               src_fsc->have_copy_from2 = false;
+                               pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
+                       }
                        dout("ceph_osdc_copy_from returned %d\n", err);
                        if (!ret)
                                ret = err;
index c074075..d01710a 100644 (file)
@@ -55,11 +55,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
        inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
        if (!inode)
                return ERR_PTR(-ENOMEM);
-       if (inode->i_state & I_NEW) {
+       if (inode->i_state & I_NEW)
                dout("get_inode created new inode %p %llx.%llx ino %llx\n",
                     inode, ceph_vinop(inode), (u64)inode->i_ino);
-               unlock_new_inode(inode);
-       }
 
        dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
             vino.snap, inode);
@@ -88,6 +86,10 @@ struct inode *ceph_get_snapdir(struct inode *parent)
        inode->i_fop = &ceph_snapdir_fops;
        ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
        ci->i_rbytes = 0;
+
+       if (inode->i_state & I_NEW)
+               unlock_new_inode(inode);
+
        return inode;
 }
 
@@ -728,8 +730,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
 static int fill_inode(struct inode *inode, struct page *locked_page,
                      struct ceph_mds_reply_info_in *iinfo,
                      struct ceph_mds_reply_dirfrag *dirinfo,
-                     struct ceph_mds_session *session,
-                     unsigned long ttl_from, int cap_fmode,
+                     struct ceph_mds_session *session, int cap_fmode,
                      struct ceph_cap_reservation *caps_reservation)
 {
        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
@@ -754,8 +755,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
        info_caps = le32_to_cpu(info->cap.caps);
 
        /* prealloc new cap struct */
-       if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
+       if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
                new_cap = ceph_get_cap(mdsc, caps_reservation);
+               if (!new_cap)
+                       return -ENOMEM;
+       }
 
        /*
         * prealloc xattr data, if it looks like we'll need it.  only
@@ -1237,7 +1241,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
                if (dir) {
                        err = fill_inode(dir, NULL,
                                         &rinfo->diri, rinfo->dirfrag,
-                                        session, req->r_request_started, -1,
+                                        session, -1,
                                         &req->r_caps_reservation);
                        if (err < 0)
                                goto done;
@@ -1302,18 +1306,22 @@ retry_lookup:
                        err = PTR_ERR(in);
                        goto done;
                }
-               req->r_target_inode = in;
 
                err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
-                               session, req->r_request_started,
+                               session,
                                (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
-                               rinfo->head->result == 0) ?  req->r_fmode : -1,
+                                rinfo->head->result == 0) ?  req->r_fmode : -1,
                                &req->r_caps_reservation);
                if (err < 0) {
                        pr_err("fill_inode badness %p %llx.%llx\n",
                                in, ceph_vinop(in));
+                       if (in->i_state & I_NEW)
+                               discard_new_inode(in);
                        goto done;
                }
+               req->r_target_inode = in;
+               if (in->i_state & I_NEW)
+                       unlock_new_inode(in);
        }
 
        /*
@@ -1493,12 +1501,18 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
                        continue;
                }
                rc = fill_inode(in, NULL, &rde->inode, NULL, session,
-                               req->r_request_started, -1,
-                               &req->r_caps_reservation);
+                               -1, &req->r_caps_reservation);
                if (rc < 0) {
                        pr_err("fill_inode badness on %p got %d\n", in, rc);
                        err = rc;
+                       if (in->i_state & I_NEW) {
+                               ihold(in);
+                               discard_new_inode(in);
+                       }
+               } else if (in->i_state & I_NEW) {
+                       unlock_new_inode(in);
                }
+
                /* avoid calling iput_final() in mds dispatch threads */
                ceph_async_iput(in);
        }
@@ -1694,19 +1708,24 @@ retry_lookup:
                }
 
                ret = fill_inode(in, NULL, &rde->inode, NULL, session,
-                                req->r_request_started, -1,
-                                &req->r_caps_reservation);
+                                -1, &req->r_caps_reservation);
                if (ret < 0) {
                        pr_err("fill_inode badness on %p\n", in);
                        if (d_really_is_negative(dn)) {
                                /* avoid calling iput_final() in mds
                                 * dispatch threads */
+                               if (in->i_state & I_NEW) {
+                                       ihold(in);
+                                       discard_new_inode(in);
+                               }
                                ceph_async_iput(in);
                        }
                        d_drop(dn);
                        err = ret;
                        goto next_item;
                }
+               if (in->i_state & I_NEW)
+                       unlock_new_inode(in);
 
                if (d_really_is_negative(dn)) {
                        if (ceph_security_xattr_deadlock(in)) {
index 145d46b..bbbbddf 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/ratelimit.h>
+#include <linux/bits.h>
 
 #include "super.h"
 #include "mds_client.h"
@@ -530,6 +531,7 @@ const char *ceph_session_state_name(int s)
        case CEPH_MDS_SESSION_OPEN: return "open";
        case CEPH_MDS_SESSION_HUNG: return "hung";
        case CEPH_MDS_SESSION_CLOSING: return "closing";
+       case CEPH_MDS_SESSION_CLOSED: return "closed";
        case CEPH_MDS_SESSION_RESTARTING: return "restarting";
        case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
        case CEPH_MDS_SESSION_REJECTED: return "rejected";
@@ -537,7 +539,7 @@ const char *ceph_session_state_name(int s)
        }
 }
 
-static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
+struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
 {
        if (refcount_inc_not_zero(&s->s_ref)) {
                dout("mdsc get_session %p %d -> %d\n", s,
@@ -568,7 +570,7 @@ struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
 {
        if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
                return NULL;
-       return get_session(mdsc->sessions[mds]);
+       return ceph_get_mds_session(mdsc->sessions[mds]);
 }
 
 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
@@ -597,7 +599,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
 {
        struct ceph_mds_session *s;
 
-       if (mds >= mdsc->mdsmap->m_num_mds)
+       if (mds >= mdsc->mdsmap->possible_max_rank)
                return ERR_PTR(-EINVAL);
 
        s = kzalloc(sizeof(*s), GFP_NOFS);
@@ -674,7 +676,6 @@ static void __unregister_session(struct ceph_mds_client *mdsc,
        dout("__unregister_session mds%d %p\n", s->s_mds, s);
        BUG_ON(mdsc->sessions[s->s_mds] != s);
        mdsc->sessions[s->s_mds] = NULL;
-       s->s_state = 0;
        ceph_con_close(&s->s_con);
        ceph_put_mds_session(s);
        atomic_dec(&mdsc->num_sessions);
@@ -878,7 +879,8 @@ static struct inode *get_nonsnap_parent(struct dentry *dentry)
  * Called under mdsc->mutex.
  */
 static int __choose_mds(struct ceph_mds_client *mdsc,
-                       struct ceph_mds_request *req)
+                       struct ceph_mds_request *req,
+                       bool *random)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
@@ -888,6 +890,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
        u32 hash = req->r_direct_hash;
        bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
 
+       if (random)
+               *random = false;
+
        /*
         * is there a specific mds we should try?  ignore hint if we have
         * no session and the mds is not up (active or recovering).
@@ -895,7 +900,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
        if (req->r_resend_mds >= 0 &&
            (__have_session(mdsc, req->r_resend_mds) ||
             ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
-               dout("choose_mds using resend_mds mds%d\n",
+               dout("%s using resend_mds mds%d\n", __func__,
                     req->r_resend_mds);
                return req->r_resend_mds;
        }
@@ -913,7 +918,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                        rcu_read_lock();
                        inode = get_nonsnap_parent(req->r_dentry);
                        rcu_read_unlock();
-                       dout("__choose_mds using snapdir's parent %p\n", inode);
+                       dout("%s using snapdir's parent %p\n", __func__, inode);
                }
        } else if (req->r_dentry) {
                /* ignore race with rename; old or new d_parent is okay */
@@ -933,7 +938,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                        /* direct snapped/virtual snapdir requests
                         * based on parent dir inode */
                        inode = get_nonsnap_parent(parent);
-                       dout("__choose_mds using nonsnap parent %p\n", inode);
+                       dout("%s using nonsnap parent %p\n", __func__, inode);
                } else {
                        /* dentry target */
                        inode = d_inode(req->r_dentry);
@@ -949,8 +954,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                rcu_read_unlock();
        }
 
-       dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
-            (int)hash, mode);
+       dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
+            hash, mode);
        if (!inode)
                goto random;
        ci = ceph_inode(inode);
@@ -968,30 +973,33 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                                get_random_bytes(&r, 1);
                                r %= frag.ndist;
                                mds = frag.dist[r];
-                               dout("choose_mds %p %llx.%llx "
-                                    "frag %u mds%d (%d/%d)\n",
-                                    inode, ceph_vinop(inode),
-                                    frag.frag, mds,
-                                    (int)r, frag.ndist);
+                               dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
+                                    __func__, inode, ceph_vinop(inode),
+                                    frag.frag, mds, (int)r, frag.ndist);
                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
-                                   CEPH_MDS_STATE_ACTIVE)
+                                   CEPH_MDS_STATE_ACTIVE &&
+                                   !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
                                        goto out;
                        }
 
                        /* since this file/dir wasn't known to be
                         * replicated, then we want to look for the
                         * authoritative mds. */
-                       mode = USE_AUTH_MDS;
                        if (frag.mds >= 0) {
                                /* choose auth mds */
                                mds = frag.mds;
-                               dout("choose_mds %p %llx.%llx "
-                                    "frag %u mds%d (auth)\n",
-                                    inode, ceph_vinop(inode), frag.frag, mds);
+                               dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
+                                    __func__, inode, ceph_vinop(inode),
+                                    frag.frag, mds);
                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
-                                   CEPH_MDS_STATE_ACTIVE)
-                                       goto out;
+                                   CEPH_MDS_STATE_ACTIVE) {
+                                       if (mode == USE_ANY_MDS &&
+                                           !ceph_mdsmap_is_laggy(mdsc->mdsmap,
+                                                                 mds))
+                                               goto out;
+                               }
                        }
+                       mode = USE_AUTH_MDS;
                }
        }
 
@@ -1007,7 +1015,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                goto random;
        }
        mds = cap->session->s_mds;
-       dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
+       dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
             inode, ceph_vinop(inode), mds,
             cap == ci->i_auth_cap ? "auth " : "", cap);
        spin_unlock(&ci->i_ceph_lock);
@@ -1018,8 +1026,11 @@ out:
        return mds;
 
 random:
+       if (random)
+               *random = true;
+
        mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
-       dout("choose_mds chose random mds%d\n", mds);
+       dout("%s chose random mds%d\n", __func__, mds);
        return mds;
 }
 
@@ -1045,20 +1056,21 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
        return msg;
 }
 
+static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
+#define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
 static void encode_supported_features(void **p, void *end)
 {
-       static const unsigned char bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
-       static const size_t count = ARRAY_SIZE(bits);
+       static const size_t count = ARRAY_SIZE(feature_bits);
 
        if (count > 0) {
                size_t i;
-               size_t size = ((size_t)bits[count - 1] + 64) / 64 * 8;
+               size_t size = FEATURE_BYTES(count);
 
                BUG_ON(*p + 4 + size > end);
                ceph_encode_32(p, size);
                memset(*p, 0, size);
                for (i = 0; i < count; i++)
-                       ((unsigned char*)(*p))[i / 8] |= 1 << (bits[i] % 8);
+                       ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
                *p += size;
        } else {
                BUG_ON(*p + 4 > end);
@@ -1079,6 +1091,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
        int metadata_key_count = 0;
        struct ceph_options *opt = mdsc->fsc->client->options;
        struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
+       size_t size, count;
        void *p, *end;
 
        const char* metadata[][2] = {
@@ -1096,8 +1109,13 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
                        strlen(metadata[i][1]);
                metadata_key_count++;
        }
+
        /* supported feature */
-       extra_bytes += 4 + 8;
+       size = 0;
+       count = ARRAY_SIZE(feature_bits);
+       if (count > 0)
+               size = FEATURE_BYTES(count);
+       extra_bytes += 4 + size;
 
        /* Allocate the message */
        msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
@@ -1117,7 +1135,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
         * Serialize client metadata into waiting buffer space, using
         * the format that userspace expects for map<string, string>
         *
-        * ClientSession messages with metadata are v2
+        * ClientSession messages with metadata are v3
         */
        msg->hdr.version = cpu_to_le16(3);
        msg->hdr.compat_version = cpu_to_le16(1);
@@ -1219,7 +1237,7 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
        struct ceph_mds_session *ts;
        int i, mds = session->s_mds;
 
-       if (mds >= mdsc->mdsmap->m_num_mds)
+       if (mds >= mdsc->mdsmap->possible_max_rank)
                return;
 
        mi = &mdsc->mdsmap->m_info[mds];
@@ -1967,7 +1985,7 @@ void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
        if (mdsc->stopping)
                return;
 
-       get_session(session);
+       ceph_get_mds_session(session);
        if (queue_work(mdsc->fsc->cap_wq,
                       &session->s_cap_release_work)) {
                dout("cap release work queued\n");
@@ -2072,7 +2090,6 @@ struct ceph_mds_request *
 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
 {
        struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
-       struct timespec64 ts;
 
        if (!req)
                return ERR_PTR(-ENOMEM);
@@ -2091,8 +2108,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
        init_completion(&req->r_safe_completion);
        INIT_LIST_HEAD(&req->r_unsafe_item);
 
-       ktime_get_coarse_real_ts64(&ts);
-       req->r_stamp = timespec64_trunc(ts, mdsc->fsc->sb->s_time_gran);
+       ktime_get_coarse_real_ts64(&req->r_stamp);
 
        req->r_op = op;
        req->r_direct_mode = mode;
@@ -2517,6 +2533,26 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
        return 0;
 }
 
+/*
+ * called under mdsc->mutex
+ */
+static int __send_request(struct ceph_mds_client *mdsc,
+                         struct ceph_mds_session *session,
+                         struct ceph_mds_request *req,
+                         bool drop_cap_releases)
+{
+       int err;
+
+       err = __prepare_send_request(mdsc, req, session->s_mds,
+                                    drop_cap_releases);
+       if (!err) {
+               ceph_msg_get(req->r_request);
+               ceph_con_send(&session->s_con, req->r_request);
+       }
+
+       return err;
+}
+
 /*
  * send request, or put it on the appropriate wait list.
  */
@@ -2526,6 +2562,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
        struct ceph_mds_session *session = NULL;
        int mds = -1;
        int err = 0;
+       bool random;
 
        if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
                if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
@@ -2558,15 +2595,14 @@ static void __do_request(struct ceph_mds_client *mdsc,
                if (!(mdsc->fsc->mount_options->flags &
                      CEPH_MOUNT_OPT_MOUNTWAIT) &&
                    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
-                       err = -ENOENT;
-                       pr_info("probably no mds server is up\n");
+                       err = -EHOSTUNREACH;
                        goto finish;
                }
        }
 
        put_request_session(req);
 
-       mds = __choose_mds(mdsc, req);
+       mds = __choose_mds(mdsc, req, &random);
        if (mds < 0 ||
            ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
                dout("do_request no mds or not active, waiting for map\n");
@@ -2583,7 +2619,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
                        goto finish;
                }
        }
-       req->r_session = get_session(session);
+       req->r_session = ceph_get_mds_session(session);
 
        dout("do_request mds%d session %p state %s\n", mds, session,
             ceph_session_state_name(session->s_state));
@@ -2594,8 +2630,12 @@ static void __do_request(struct ceph_mds_client *mdsc,
                        goto out_session;
                }
                if (session->s_state == CEPH_MDS_SESSION_NEW ||
-                   session->s_state == CEPH_MDS_SESSION_CLOSING)
+                   session->s_state == CEPH_MDS_SESSION_CLOSING) {
                        __open_session(mdsc, session);
+                       /* retry the same mds later */
+                       if (random)
+                               req->r_resend_mds = mds;
+               }
                list_add(&req->r_wait, &session->s_waiting);
                goto out_session;
        }
@@ -2606,11 +2646,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
        if (req->r_request_started == 0)   /* note request start time */
                req->r_request_started = jiffies;
 
-       err = __prepare_send_request(mdsc, req, mds, false);
-       if (!err) {
-               ceph_msg_get(req->r_request);
-               ceph_con_send(&session->s_con, req->r_request);
-       }
+       err = __send_request(mdsc, session, req, false);
 
 out_session:
        ceph_put_mds_session(session);
@@ -2863,7 +2899,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                        mutex_unlock(&mdsc->mutex);
                        goto out;
                } else  {
-                       int mds = __choose_mds(mdsc, req);
+                       int mds = __choose_mds(mdsc, req, NULL);
                        if (mds >= 0 && mds != req->r_session->s_mds) {
                                dout("but auth changed, so resending\n");
                                __do_request(mdsc, req);
@@ -2879,6 +2915,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
                __unregister_request(mdsc, req);
 
+               /* last request during umount? */
+               if (mdsc->stopping && !__get_oldest_req(mdsc))
+                       complete_all(&mdsc->safe_umount_waiters);
+
                if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
                        /*
                         * We already handled the unsafe response, now do the
@@ -2889,9 +2929,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                         */
                        dout("got safe reply %llu, mds%d\n", tid, mds);
 
-                       /* last unsafe request during umount? */
-                       if (mdsc->stopping && !__get_oldest_req(mdsc))
-                               complete_all(&mdsc->safe_umount_waiters);
                        mutex_unlock(&mdsc->mutex);
                        goto out;
                }
@@ -3106,7 +3143,7 @@ static void handle_session(struct ceph_mds_session *session,
 
        mutex_lock(&mdsc->mutex);
        if (op == CEPH_SESSION_CLOSE) {
-               get_session(session);
+               ceph_get_mds_session(session);
                __unregister_session(mdsc, session);
        }
        /* FIXME: this ttl calculation is generous */
@@ -3144,6 +3181,7 @@ static void handle_session(struct ceph_mds_session *session,
        case CEPH_SESSION_CLOSE:
                if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
                        pr_info("mds%d reconnect denied\n", session->s_mds);
+               session->s_state = CEPH_MDS_SESSION_CLOSED;
                cleanup_session_requests(mdsc, session);
                remove_session_caps(session);
                wake = 2; /* for good measure */
@@ -3211,7 +3249,6 @@ bad:
        return;
 }
 
-
 /*
  * called under session->mutex.
  */
@@ -3220,18 +3257,12 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
 {
        struct ceph_mds_request *req, *nreq;
        struct rb_node *p;
-       int err;
 
        dout("replay_unsafe_requests mds%d\n", session->s_mds);
 
        mutex_lock(&mdsc->mutex);
-       list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
-               err = __prepare_send_request(mdsc, req, session->s_mds, true);
-               if (!err) {
-                       ceph_msg_get(req->r_request);
-                       ceph_con_send(&session->s_con, req->r_request);
-               }
-       }
+       list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
+               __send_request(mdsc, session, req, true);
 
        /*
         * also re-send old requests when MDS enters reconnect stage. So that MDS
@@ -3246,14 +3277,8 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
                if (req->r_attempts == 0)
                        continue; /* only old requests */
                if (req->r_session &&
-                   req->r_session->s_mds == session->s_mds) {
-                       err = __prepare_send_request(mdsc, req,
-                                                    session->s_mds, true);
-                       if (!err) {
-                               ceph_msg_get(req->r_request);
-                               ceph_con_send(&session->s_con, req->r_request);
-                       }
-               }
+                   req->r_session->s_mds == session->s_mds)
+                       __send_request(mdsc, session, req, true);
        }
        mutex_unlock(&mdsc->mutex);
 }
@@ -3764,7 +3789,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
        dout("check_new_map new %u old %u\n",
             newmap->m_epoch, oldmap->m_epoch);
 
-       for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) {
+       for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
                if (!mdsc->sessions[i])
                        continue;
                s = mdsc->sessions[i];
@@ -3778,9 +3803,9 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                     ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
                     ceph_session_state_name(s->s_state));
 
-               if (i >= newmap->m_num_mds) {
+               if (i >= newmap->possible_max_rank) {
                        /* force close session for stopped mds */
-                       get_session(s);
+                       ceph_get_mds_session(s);
                        __unregister_session(mdsc, s);
                        __wake_requests(mdsc, &s->s_waiting);
                        mutex_unlock(&mdsc->mutex);
@@ -3835,7 +3860,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                }
        }
 
-       for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) {
+       for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
                s = mdsc->sessions[i];
                if (!s)
                        continue;
@@ -4381,7 +4406,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
        mutex_lock(&mdsc->mutex);
        for (i = 0; i < mdsc->max_sessions; i++) {
                if (mdsc->sessions[i]) {
-                       session = get_session(mdsc->sessions[i]);
+                       session = ceph_get_mds_session(mdsc->sessions[i]);
                        __unregister_session(mdsc, session);
                        mutex_unlock(&mdsc->mutex);
                        mutex_lock(&session->s_mutex);
@@ -4609,11 +4634,8 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
-       if (get_session(s)) {
-               dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref));
+       if (ceph_get_mds_session(s))
                return con;
-       }
-       dout("mdsc con_get %p FAIL\n", s);
        return NULL;
 }
 
@@ -4621,7 +4643,6 @@ static void con_put(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
-       dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1);
        ceph_put_mds_session(s);
 }
 
index 14c7e8c..27a7446 100644 (file)
 #include <linux/ceph/auth.h>
 
 /* The first 8 bits are reserved for old ceph releases */
-#define CEPHFS_FEATURE_MIMIC           8
-#define CEPHFS_FEATURE_REPLY_ENCODING  9
-#define CEPHFS_FEATURE_RECLAIM_CLIENT  10
-#define CEPHFS_FEATURE_LAZY_CAP_WANTED 11
-#define CEPHFS_FEATURE_MULTI_RECONNECT  12
+enum ceph_feature_type {
+       CEPHFS_FEATURE_MIMIC = 8,
+       CEPHFS_FEATURE_REPLY_ENCODING,
+       CEPHFS_FEATURE_RECLAIM_CLIENT,
+       CEPHFS_FEATURE_LAZY_CAP_WANTED,
+       CEPHFS_FEATURE_MULTI_RECONNECT,
+
+       CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_MULTI_RECONNECT,
+};
 
-#define CEPHFS_FEATURES_CLIENT_SUPPORTED {     \
+/*
+ * This will always have the highest feature bit value
+ * as the last element of the array.
+ */
+#define CEPHFS_FEATURES_CLIENT_SUPPORTED {     \
        0, 1, 2, 3, 4, 5, 6, 7,                 \
        CEPHFS_FEATURE_MIMIC,                   \
        CEPHFS_FEATURE_REPLY_ENCODING,          \
        CEPHFS_FEATURE_LAZY_CAP_WANTED,         \
        CEPHFS_FEATURE_MULTI_RECONNECT,         \
+                                               \
+       CEPHFS_FEATURE_MAX,                     \
 }
 #define CEPHFS_FEATURES_CLIENT_REQUIRED {}
 
-
 /*
  * Some lock dependencies:
  *
@@ -151,7 +160,8 @@ enum {
        CEPH_MDS_SESSION_RESTARTING = 5,
        CEPH_MDS_SESSION_RECONNECTING = 6,
        CEPH_MDS_SESSION_CLOSING = 7,
-       CEPH_MDS_SESSION_REJECTED = 8,
+       CEPH_MDS_SESSION_CLOSED = 8,
+       CEPH_MDS_SESSION_REJECTED = 9,
 };
 
 struct ceph_mds_session {
@@ -174,6 +184,7 @@ struct ceph_mds_session {
 
        /* protected by s_cap_lock */
        spinlock_t        s_cap_lock;
+       refcount_t        s_ref;
        struct list_head  s_caps;     /* all caps issued by this session */
        struct ceph_cap  *s_cap_iterator;
        int               s_nr_caps;
@@ -188,7 +199,6 @@ struct ceph_mds_session {
        unsigned long     s_renew_requested; /* last time we sent a renew req */
        u64               s_renew_seq;
 
-       refcount_t        s_ref;
        struct list_head  s_waiting;  /* waiting requests */
        struct list_head  s_unsafe;   /* unsafe requests */
 };
@@ -224,6 +234,7 @@ struct ceph_mds_request {
        struct rb_node r_node;
        struct ceph_mds_client *r_mdsc;
 
+       struct kref       r_kref;
        int r_op;                    /* mds op code */
 
        /* operation on what? */
@@ -294,7 +305,6 @@ struct ceph_mds_request {
        int               r_resend_mds; /* mds to resend to next, if any*/
        u32               r_sent_on_mseq; /* cap mseq request was sent at*/
 
-       struct kref       r_kref;
        struct list_head  r_wait;
        struct completion r_completion;
        struct completion r_safe_completion;
@@ -451,15 +461,10 @@ extern const char *ceph_mds_op_name(int op);
 extern struct ceph_mds_session *
 __ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
 
-static inline struct ceph_mds_session *
-ceph_get_mds_session(struct ceph_mds_session *s)
-{
-       refcount_inc(&s->s_ref);
-       return s;
-}
-
 extern const char *ceph_session_state_name(int s);
 
+extern struct ceph_mds_session *
+ceph_get_mds_session(struct ceph_mds_session *s);
 extern void ceph_put_mds_session(struct ceph_mds_session *s);
 
 extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
index 471bac3..8896278 100644 (file)
 
 #include "super.h"
 
+#define CEPH_MDS_IS_READY(i, ignore_laggy) \
+       (m->m_info[i].state > 0 && ignore_laggy ? true : !m->m_info[i].laggy)
 
-/*
- * choose a random mds that is "up" (i.e. has a state > 0), or -1.
- */
-int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
+static int __mdsmap_get_random_mds(struct ceph_mdsmap *m, bool ignore_laggy)
 {
        int n = 0;
        int i, j;
 
-       /* special case for one mds */
-       if (1 == m->m_num_mds && m->m_info[0].state > 0)
-               return 0;
-
        /* count */
-       for (i = 0; i < m->m_num_mds; i++)
-               if (m->m_info[i].state > 0)
+       for (i = 0; i < m->possible_max_rank; i++)
+               if (CEPH_MDS_IS_READY(i, ignore_laggy))
                        n++;
        if (n == 0)
                return -1;
 
        /* pick */
        n = prandom_u32() % n;
-       for (j = 0, i = 0; i < m->m_num_mds; i++) {
-               if (m->m_info[i].state > 0)
+       for (j = 0, i = 0; i < m->possible_max_rank; i++) {
+               if (CEPH_MDS_IS_READY(i, ignore_laggy))
                        j++;
                if (j > n)
                        break;
@@ -45,6 +40,20 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
        return i;
 }
 
+/*
+ * choose a random mds that is "up" (i.e. has a state > 0), or -1.
+ */
+int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
+{
+       int mds;
+
+       mds = __mdsmap_get_random_mds(m, false);
+       if (mds == m->possible_max_rank || mds == -1)
+               mds = __mdsmap_get_random_mds(m, true);
+
+       return mds == m->possible_max_rank ? -1 : mds;
+}
+
 #define __decode_and_drop_type(p, end, type, bad)              \
        do {                                                    \
                if (*p + sizeof(type) > end)                    \
@@ -138,14 +147,29 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
        m->m_session_autoclose = ceph_decode_32(p);
        m->m_max_file_size = ceph_decode_64(p);
        m->m_max_mds = ceph_decode_32(p);
-       m->m_num_mds = m->m_max_mds;
 
-       m->m_info = kcalloc(m->m_num_mds, sizeof(*m->m_info), GFP_NOFS);
+       /*
+        * pick out the active nodes as the m_num_active_mds, the
+        * m_num_active_mds maybe larger than m_max_mds when decreasing
+        * the max_mds in cluster side, in other case it should less
+        * than or equal to m_max_mds.
+        */
+       m->m_num_active_mds = n = ceph_decode_32(p);
+
+       /*
+        * the possible max rank, it maybe larger than the m_num_active_mds,
+        * for example if the mds_max == 2 in the cluster, when the MDS(0)
+        * was laggy and being replaced by a new MDS, we will temporarily
+        * receive a new mds map with n_num_mds == 1 and the active MDS(1),
+        * and the mds rank >= m_num_active_mds.
+        */
+       m->possible_max_rank = max(m->m_num_active_mds, m->m_max_mds);
+
+       m->m_info = kcalloc(m->possible_max_rank, sizeof(*m->m_info), GFP_NOFS);
        if (!m->m_info)
                goto nomem;
 
        /* pick out active nodes from mds_info (state > 0) */
-       n = ceph_decode_32(p);
        for (i = 0; i < n; i++) {
                u64 global_id;
                u32 namelen;
@@ -215,18 +239,15 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                     ceph_mds_state_name(state),
                     laggy ? "(laggy)" : "");
 
-               if (mds < 0 || state <= 0)
+               if (mds < 0 || mds >= m->possible_max_rank) {
+                       pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds);
                        continue;
+               }
 
-               if (mds >= m->m_num_mds) {
-                       int new_num = max(mds + 1, m->m_num_mds * 2);
-                       void *new_m_info = krealloc(m->m_info,
-                                               new_num * sizeof(*m->m_info),
-                                               GFP_NOFS | __GFP_ZERO);
-                       if (!new_m_info)
-                               goto nomem;
-                       m->m_info = new_m_info;
-                       m->m_num_mds = new_num;
+               if (state <= 0) {
+                       pr_warn("mdsmap_decode got incorrect state(%s)\n",
+                               ceph_mds_state_name(state));
+                       continue;
                }
 
                info = &m->m_info[mds];
@@ -247,14 +268,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                        info->export_targets = NULL;
                }
        }
-       if (m->m_num_mds > m->m_max_mds) {
-               /* find max up mds */
-               for (i = m->m_num_mds; i >= m->m_max_mds; i--) {
-                       if (i == 0 || m->m_info[i-1].state > 0)
-                               break;
-               }
-               m->m_num_mds = i;
-       }
 
        /* pg_pools */
        ceph_decode_32_safe(p, end, n, bad);
@@ -296,14 +309,14 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 
                for (i = 0; i < n; i++) {
                        s32 mds = ceph_decode_32(p);
-                       if (mds >= 0 && mds < m->m_num_mds) {
+                       if (mds >= 0 && mds < m->possible_max_rank) {
                                if (m->m_info[mds].laggy)
                                        num_laggy++;
                        }
                }
                m->m_num_laggy = num_laggy;
 
-               if (n > m->m_num_mds) {
+               if (n > m->possible_max_rank) {
                        void *new_m_info = krealloc(m->m_info,
                                                    n * sizeof(*m->m_info),
                                                    GFP_NOFS | __GFP_ZERO);
@@ -311,7 +324,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                                goto nomem;
                        m->m_info = new_m_info;
                }
-               m->m_num_mds = n;
+               m->possible_max_rank = n;
        }
 
        /* inc */
@@ -382,7 +395,7 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
 {
        int i;
 
-       for (i = 0; i < m->m_num_mds; i++)
+       for (i = 0; i < m->possible_max_rank; i++)
                kfree(m->m_info[i].export_targets);
        kfree(m->m_info);
        kfree(m->m_data_pg_pools);
@@ -396,9 +409,9 @@ bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m)
                return false;
        if (m->m_damaged)
                return false;
-       if (m->m_num_laggy > 0)
+       if (m->m_num_laggy == m->m_num_active_mds)
                return false;
-       for (i = 0; i < m->m_num_mds; i++) {
+       for (i = 0; i < m->possible_max_rank; i++) {
                if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE)
                        nr_active++;
        }
index 29a795f..bfb8aea 100644 (file)
@@ -107,7 +107,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
-
 static int ceph_sync_fs(struct super_block *sb, int wait)
 {
        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
@@ -211,7 +210,6 @@ struct ceph_parse_opts_ctx {
 
 /*
  * Parse the source parameter.  Distinguish the server list from the path.
- * Internally we do not include the leading '/' in the path.
  *
  * The source will look like:
  *     <server_spec>[,<server_spec>...]:[<path>]
@@ -232,12 +230,15 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
 
        dev_name_end = strchr(dev_name, '/');
        if (dev_name_end) {
-               if (strlen(dev_name_end) > 1) {
-                       kfree(fsopt->server_path);
-                       fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
-                       if (!fsopt->server_path)
-                               return -ENOMEM;
-               }
+               kfree(fsopt->server_path);
+
+               /*
+                * The server_path will include the whole chars from userland
+                * including the leading '/'.
+                */
+               fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+               if (!fsopt->server_path)
+                       return -ENOMEM;
        } else {
                dev_name_end = dev_name + strlen(dev_name);
        }
@@ -461,6 +462,73 @@ static int strcmp_null(const char *s1, const char *s2)
        return strcmp(s1, s2);
 }
 
+/**
+ * path_remove_extra_slash - Remove the extra slashes in the server path
+ * @server_path: the server path and could be NULL
+ *
+ * Return NULL if the path is NULL or only consists of "/", or a string
+ * without any extra slashes including the leading slash(es) and the
+ * slash(es) at the end of the server path, such as:
+ * "//dir1////dir2///" --> "dir1/dir2"
+ */
+static char *path_remove_extra_slash(const char *server_path)
+{
+       const char *path = server_path;
+       const char *cur, *end;
+       char *buf, *p;
+       int len;
+
+       /* if the server path is omitted */
+       if (!path)
+               return NULL;
+
+       /* remove all the leading slashes */
+       while (*path == '/')
+               path++;
+
+       /* if the server path only consists of slashes */
+       if (*path == '\0')
+               return NULL;
+
+       len = strlen(path);
+
+       buf = kmalloc(len + 1, GFP_KERNEL);
+       if (!buf)
+               return ERR_PTR(-ENOMEM);
+
+       end = path + len;
+       p = buf;
+       do {
+               cur = strchr(path, '/');
+               if (!cur)
+                       cur = end;
+
+               len = cur - path;
+
+               /* including one '/' */
+               if (cur != end)
+                       len += 1;
+
+               memcpy(p, path, len);
+               p += len;
+
+               while (cur <= end && *cur == '/')
+                       cur++;
+               path = cur;
+       } while (path < end);
+
+       *p = '\0';
+
+       /*
+        * remove the last slash if there has and just to make sure that
+        * we will get something like "dir1/dir2"
+        */
+       if (*(--p) == '/')
+               *p = '\0';
+
+       return buf;
+}
+
 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
                                 struct ceph_options *new_opt,
                                 struct ceph_fs_client *fsc)
@@ -468,6 +536,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
        struct ceph_mount_options *fsopt1 = new_fsopt;
        struct ceph_mount_options *fsopt2 = fsc->mount_options;
        int ofs = offsetof(struct ceph_mount_options, snapdir_name);
+       char *p1, *p2;
        int ret;
 
        ret = memcmp(fsopt1, fsopt2, ofs);
@@ -480,9 +549,21 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
        ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
        if (ret)
                return ret;
-       ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
+
+       p1 = path_remove_extra_slash(fsopt1->server_path);
+       if (IS_ERR(p1))
+               return PTR_ERR(p1);
+       p2 = path_remove_extra_slash(fsopt2->server_path);
+       if (IS_ERR(p2)) {
+               kfree(p1);
+               return PTR_ERR(p2);
+       }
+       ret = strcmp_null(p1, p2);
+       kfree(p1);
+       kfree(p2);
        if (ret)
                return ret;
+
        ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
        if (ret)
                return ret;
@@ -637,6 +718,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
        fsc->sb = NULL;
        fsc->mount_state = CEPH_MOUNT_MOUNTING;
        fsc->filp_gen = 1;
+       fsc->have_copy_from2 = true;
 
        atomic_long_set(&fsc->writeback_count, 0);
 
@@ -788,7 +870,6 @@ static void destroy_caches(void)
        ceph_fscache_unregister();
 }
 
-
 /*
  * ceph_umount_begin - initiate forced umount.  Tear down down the
  * mount, skipping steps that may hang while waiting for server(s).
@@ -868,9 +949,6 @@ out:
        return root;
 }
 
-
-
-
 /*
  * mount: join the ceph cluster, and open root directory.
  */
@@ -885,7 +963,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
        mutex_lock(&fsc->client->mount_mutex);
 
        if (!fsc->sb->s_root) {
-               const char *path;
+               const char *path, *p;
                err = __ceph_open_session(fsc->client, started);
                if (err < 0)
                        goto out;
@@ -897,17 +975,22 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
                                goto out;
                }
 
-               if (!fsc->mount_options->server_path) {
-                       path = "";
-                       dout("mount opening path \\t\n");
-               } else {
-                       path = fsc->mount_options->server_path + 1;
-                       dout("mount opening path %s\n", path);
+               p = path_remove_extra_slash(fsc->mount_options->server_path);
+               if (IS_ERR(p)) {
+                       err = PTR_ERR(p);
+                       goto out;
                }
+               /* if the server path is omitted or just consists of '/' */
+               if (!p)
+                       path = "";
+               else
+                       path = p;
+               dout("mount opening path '%s'\n", path);
 
                ceph_fs_debugfs_init(fsc);
 
                root = open_root_dentry(fsc, path, started);
+               kfree(p);
                if (IS_ERR(root)) {
                        err = PTR_ERR(root);
                        goto out;
@@ -1070,6 +1153,11 @@ static int ceph_get_tree(struct fs_context *fc)
        return 0;
 
 out_splat:
+       if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
+               pr_info("No mds server is up or the cluster is laggy\n");
+               err = -EHOSTUNREACH;
+       }
+
        ceph_mdsc_close_sessions(fsc->mdsc);
        deactivate_locked_super(sb);
        goto out_final;
index 3bf1a01..1e456a9 100644 (file)
@@ -106,6 +106,8 @@ struct ceph_fs_client {
        unsigned long last_auto_reconnect;
        bool blacklisted;
 
+       bool have_copy_from2;
+
        u32 filp_gen;
        loff_t max_file_size;
 
diff --git a/fs/ceph/util.c b/fs/ceph/util.c
new file mode 100644 (file)
index 0000000..2c34875
--- /dev/null
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Some non-inline ceph helpers
+ */
+#include <linux/module.h>
+#include <linux/ceph/types.h>
+
+/*
+ * return true if @layout appears to be valid
+ */
+int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
+{
+       __u32 su = layout->stripe_unit;
+       __u32 sc = layout->stripe_count;
+       __u32 os = layout->object_size;
+
+       /* stripe unit, object size must be non-zero, 64k increment */
+       if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
+               return 0;
+       if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
+               return 0;
+       /* object size must be a multiple of stripe unit */
+       if (os < su || os % su)
+               return 0;
+       /* stripe count must be non-zero */
+       if (!sc)
+               return 0;
+       return 1;
+}
+
+void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
+                                 struct ceph_file_layout_legacy *legacy)
+{
+       fl->stripe_unit = le32_to_cpu(legacy->fl_stripe_unit);
+       fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
+       fl->object_size = le32_to_cpu(legacy->fl_object_size);
+       fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
+       if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
+           fl->stripe_count == 0 && fl->object_size == 0)
+               fl->pool_id = -1;
+}
+
+void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
+                               struct ceph_file_layout_legacy *legacy)
+{
+       legacy->fl_stripe_unit = cpu_to_le32(fl->stripe_unit);
+       legacy->fl_stripe_count = cpu_to_le32(fl->stripe_count);
+       legacy->fl_object_size = cpu_to_le32(fl->object_size);
+       if (fl->pool_id >= 0)
+               legacy->fl_pg_pool = cpu_to_le32(fl->pool_id);
+       else
+               legacy->fl_pg_pool = 0;
+}
+
+int ceph_flags_to_mode(int flags)
+{
+       int mode;
+
+#ifdef O_DIRECTORY  /* fixme */
+       if ((flags & O_DIRECTORY) == O_DIRECTORY)
+               return CEPH_FILE_MODE_PIN;
+#endif
+
+       switch (flags & O_ACCMODE) {
+       case O_WRONLY:
+               mode = CEPH_FILE_MODE_WR;
+               break;
+       case O_RDONLY:
+               mode = CEPH_FILE_MODE_RD;
+               break;
+       case O_RDWR:
+       case O_ACCMODE: /* this is what the VFS does */
+               mode = CEPH_FILE_MODE_RDWR;
+               break;
+       }
+#ifdef O_LAZY
+       if (flags & O_LAZY)
+               mode |= CEPH_FILE_MODE_LAZY;
+#endif
+
+       return mode;
+}
+
+int ceph_caps_for_mode(int mode)
+{
+       int caps = CEPH_CAP_PIN;
+
+       if (mode & CEPH_FILE_MODE_RD)
+               caps |= CEPH_CAP_FILE_SHARED |
+                       CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
+       if (mode & CEPH_FILE_MODE_WR)
+               caps |= CEPH_CAP_FILE_EXCL |
+                       CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
+                       CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
+                       CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
+       if (mode & CEPH_FILE_MODE_LAZY)
+               caps |= CEPH_CAP_FILE_LAZYIO;
+
+       return caps;
+}
index cb18ee6..7b8a070 100644 (file)
@@ -655,7 +655,7 @@ static int __build_xattrs(struct inode *inode)
        u32 len;
        const char *name, *val;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       int xattr_version;
+       u64 xattr_version;
        struct ceph_inode_xattr **xattrs = NULL;
        int err = 0;
        int i;
@@ -851,7 +851,7 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
        req_mask = __get_request_mask(inode);
 
        spin_lock(&ci->i_ceph_lock);
-       dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
+       dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
        if (ci->i_xattrs.version == 0 ||
@@ -1078,7 +1078,8 @@ retry:
                }
        }
 
-       dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
+       dout("setxattr %p name '%s' issued %s\n", inode, name,
+            ceph_cap_string(issued));
        __build_xattrs(inode);
 
        required_blob_size = __get_required_blob_size(ci, name_len, val_len);
index 19f6e59..276e4b5 100644 (file)
@@ -611,12 +611,12 @@ static int cifs_stats_proc_open(struct inode *inode, struct file *file)
        return single_open(file, cifs_stats_proc_show, NULL);
 }
 
-static const struct file_operations cifs_stats_proc_fops = {
-       .open           = cifs_stats_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = cifs_stats_proc_write,
+static const struct proc_ops cifs_stats_proc_ops = {
+       .proc_open      = cifs_stats_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = cifs_stats_proc_write,
 };
 
 #ifdef CONFIG_CIFS_SMB_DIRECT
@@ -640,12 +640,12 @@ static int name##_open(struct inode *inode, struct file *file) \
        return single_open(file, name##_proc_show, NULL); \
 } \
 \
-static const struct file_operations cifs_##name##_proc_fops = { \
-       .open           = name##_open, \
-       .read           = seq_read, \
-       .llseek         = seq_lseek, \
-       .release        = single_release, \
-       .write          = name##_write, \
+static const struct proc_ops cifs_##name##_proc_fops = { \
+       .proc_open      = name##_open, \
+       .proc_read      = seq_read, \
+       .proc_lseek     = seq_lseek, \
+       .proc_release   = single_release, \
+       .proc_write     = name##_write, \
 }
 
 PROC_FILE_DEFINE(rdma_readwrite_threshold);
@@ -659,11 +659,11 @@ PROC_FILE_DEFINE(smbd_receive_credit_max);
 #endif
 
 static struct proc_dir_entry *proc_fs_cifs;
-static const struct file_operations cifsFYI_proc_fops;
-static const struct file_operations cifs_lookup_cache_proc_fops;
-static const struct file_operations traceSMB_proc_fops;
-static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_linux_ext_proc_fops;
+static const struct proc_ops cifsFYI_proc_ops;
+static const struct proc_ops cifs_lookup_cache_proc_ops;
+static const struct proc_ops traceSMB_proc_ops;
+static const struct proc_ops cifs_security_flags_proc_ops;
+static const struct proc_ops cifs_linux_ext_proc_ops;
 
 void
 cifs_proc_init(void)
@@ -678,18 +678,18 @@ cifs_proc_init(void)
        proc_create_single("open_files", 0400, proc_fs_cifs,
                        cifs_debug_files_proc_show);
 
-       proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_fops);
-       proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_fops);
-       proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_fops);
+       proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_ops);
+       proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_ops);
+       proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_ops);
        proc_create("LinuxExtensionsEnabled", 0644, proc_fs_cifs,
-                   &cifs_linux_ext_proc_fops);
+                   &cifs_linux_ext_proc_ops);
        proc_create("SecurityFlags", 0644, proc_fs_cifs,
-                   &cifs_security_flags_proc_fops);
+                   &cifs_security_flags_proc_ops);
        proc_create("LookupCacheEnabled", 0644, proc_fs_cifs,
-                   &cifs_lookup_cache_proc_fops);
+                   &cifs_lookup_cache_proc_ops);
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
-       proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_fops);
+       proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_ops);
 #endif
 
 #ifdef CONFIG_CIFS_SMB_DIRECT
@@ -774,12 +774,12 @@ static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
        return count;
 }
 
-static const struct file_operations cifsFYI_proc_fops = {
-       .open           = cifsFYI_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = cifsFYI_proc_write,
+static const struct proc_ops cifsFYI_proc_ops = {
+       .proc_open      = cifsFYI_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = cifsFYI_proc_write,
 };
 
 static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
@@ -805,12 +805,12 @@ static ssize_t cifs_linux_ext_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations cifs_linux_ext_proc_fops = {
-       .open           = cifs_linux_ext_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = cifs_linux_ext_proc_write,
+static const struct proc_ops cifs_linux_ext_proc_ops = {
+       .proc_open      = cifs_linux_ext_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = cifs_linux_ext_proc_write,
 };
 
 static int cifs_lookup_cache_proc_show(struct seq_file *m, void *v)
@@ -836,12 +836,12 @@ static ssize_t cifs_lookup_cache_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations cifs_lookup_cache_proc_fops = {
-       .open           = cifs_lookup_cache_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = cifs_lookup_cache_proc_write,
+static const struct proc_ops cifs_lookup_cache_proc_ops = {
+       .proc_open      = cifs_lookup_cache_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = cifs_lookup_cache_proc_write,
 };
 
 static int traceSMB_proc_show(struct seq_file *m, void *v)
@@ -867,12 +867,12 @@ static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
        return count;
 }
 
-static const struct file_operations traceSMB_proc_fops = {
-       .open           = traceSMB_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = traceSMB_proc_write,
+static const struct proc_ops traceSMB_proc_ops = {
+       .proc_open      = traceSMB_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = traceSMB_proc_write,
 };
 
 static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
@@ -978,12 +978,12 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations cifs_security_flags_proc_fops = {
-       .open           = cifs_security_flags_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = cifs_security_flags_proc_write,
+static const struct proc_ops cifs_security_flags_proc_ops = {
+       .proc_open      = cifs_security_flags_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = cifs_security_flags_proc_write,
 };
 #else
 inline void cifs_proc_init(void)
index 9a384d1..43c1b43 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/jhash.h>
 #include <linux/ktime.h>
 #include <linux/slab.h>
+#include <linux/proc_fs.h>
 #include <linux/nls.h>
 #include <linux/workqueue.h>
 #include "cifsglob.h"
@@ -211,12 +212,12 @@ static int dfscache_proc_open(struct inode *inode, struct file *file)
        return single_open(file, dfscache_proc_show, NULL);
 }
 
-const struct file_operations dfscache_proc_fops = {
-       .open           = dfscache_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = dfscache_proc_write,
+const struct proc_ops dfscache_proc_ops = {
+       .proc_open      = dfscache_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = dfscache_proc_write,
 };
 
 #ifdef CONFIG_CIFS_DEBUG2
index 76c7329..99ee44f 100644 (file)
@@ -24,7 +24,7 @@ struct dfs_cache_tgt_iterator {
 
 extern int dfs_cache_init(void);
 extern void dfs_cache_destroy(void);
-extern const struct file_operations dfscache_proc_fops;
+extern const struct proc_ops dfscache_proc_ops;
 
 extern int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
                          const struct nls_table *nls_codepage, int remap,
index 9b547f7..676e96a 100644 (file)
@@ -113,6 +113,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
        }
 
         /* revalidate if mtime or size have changed */
+       fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
        if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
            cifs_i->server_eof == fattr->cf_eof) {
                cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
@@ -162,6 +163,9 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
        cifs_revalidate_cache(inode, fattr);
 
        spin_lock(&inode->i_lock);
+       fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
+       fattr->cf_atime = timestamp_truncate(fattr->cf_atime, inode);
+       fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode);
        /* we do not want atime to be less than mtime, it broke some apps */
        if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
                inode->i_atime = fattr->cf_mtime;
@@ -329,8 +333,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
        fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
        fattr->cf_uid = cifs_sb->mnt_uid;
        fattr->cf_gid = cifs_sb->mnt_gid;
-       ktime_get_real_ts64(&fattr->cf_mtime);
-       fattr->cf_mtime = timespec64_trunc(fattr->cf_mtime, sb->s_time_gran);
+       ktime_get_coarse_real_ts64(&fattr->cf_mtime);
        fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
        fattr->cf_nlink = 2;
        fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL;
@@ -609,10 +612,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
 
        if (info->LastAccessTime)
                fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
-       else {
-               ktime_get_real_ts64(&fattr->cf_atime);
-               fattr->cf_atime = timespec64_trunc(fattr->cf_atime, sb->s_time_gran);
-       }
+       else
+               ktime_get_coarse_real_ts64(&fattr->cf_atime);
 
        fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
        fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
index 680aba9..fd0b5dd 100644 (file)
@@ -76,14 +76,11 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
        if (ia_valid & ATTR_GID)
                sd_iattr->ia_gid = iattr->ia_gid;
        if (ia_valid & ATTR_ATIME)
-               sd_iattr->ia_atime = timestamp_truncate(iattr->ia_atime,
-                                                     inode);
+               sd_iattr->ia_atime = iattr->ia_atime;
        if (ia_valid & ATTR_MTIME)
-               sd_iattr->ia_mtime = timestamp_truncate(iattr->ia_mtime,
-                                                     inode);
+               sd_iattr->ia_mtime = iattr->ia_mtime;
        if (ia_valid & ATTR_CTIME)
-               sd_iattr->ia_ctime = timestamp_truncate(iattr->ia_ctime,
-                                                     inode);
+               sd_iattr->ia_ctime = iattr->ia_ctime;
        if (ia_valid & ATTR_MODE) {
                umode_t mode = iattr->ia_mode;
 
index dc6cffc..e742dfc 100644 (file)
@@ -332,7 +332,10 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
                parent = debugfs_mount->mnt_root;
 
        inode_lock(d_inode(parent));
-       dentry = lookup_one_len(name, parent, strlen(name));
+       if (unlikely(IS_DEADDIR(d_inode(parent))))
+               dentry = ERR_PTR(-ENOENT);
+       else
+               dentry = lookup_one_len(name, parent, strlen(name));
        if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
                if (d_is_dir(dentry))
                        pr_err("Directory '%s' with parent '%s' already present!\n",
@@ -681,62 +684,15 @@ static void __debugfs_file_removed(struct dentry *dentry)
                wait_for_completion(&fsd->active_users_drained);
 }
 
-static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
-{
-       int ret = 0;
-
-       if (simple_positive(dentry)) {
-               dget(dentry);
-               if (d_is_dir(dentry)) {
-                       ret = simple_rmdir(d_inode(parent), dentry);
-                       if (!ret)
-                               fsnotify_rmdir(d_inode(parent), dentry);
-               } else {
-                       simple_unlink(d_inode(parent), dentry);
-                       fsnotify_unlink(d_inode(parent), dentry);
-               }
-               if (!ret)
-                       d_delete(dentry);
-               if (d_is_reg(dentry))
-                       __debugfs_file_removed(dentry);
-               dput(dentry);
-       }
-       return ret;
-}
-
-/**
- * debugfs_remove - removes a file or directory from the debugfs filesystem
- * @dentry: a pointer to a the dentry of the file or directory to be
- *          removed.  If this parameter is NULL or an error value, nothing
- *          will be done.
- *
- * This function removes a file or directory in debugfs that was previously
- * created with a call to another debugfs function (like
- * debugfs_create_file() or variants thereof.)
- *
- * This function is required to be called in order for the file to be
- * removed, no automatic cleanup of files will happen when a module is
- * removed, you are responsible here.
- */
-void debugfs_remove(struct dentry *dentry)
+static void remove_one(struct dentry *victim)
 {
-       struct dentry *parent;
-       int ret;
-
-       if (IS_ERR_OR_NULL(dentry))
-               return;
-
-       parent = dentry->d_parent;
-       inode_lock(d_inode(parent));
-       ret = __debugfs_remove(dentry, parent);
-       inode_unlock(d_inode(parent));
-       if (!ret)
-               simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+        if (d_is_reg(victim))
+               __debugfs_file_removed(victim);
+       simple_release_fs(&debugfs_mount, &debugfs_mount_count);
 }
-EXPORT_SYMBOL_GPL(debugfs_remove);
 
 /**
- * debugfs_remove_recursive - recursively removes a directory
+ * debugfs_remove - recursively removes a directory
  * @dentry: a pointer to a the dentry of the directory to be removed.  If this
  *          parameter is NULL or an error value, nothing will be done.
  *
@@ -748,65 +704,16 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
  * removed, no automatic cleanup of files will happen when a module is
  * removed, you are responsible here.
  */
-void debugfs_remove_recursive(struct dentry *dentry)
+void debugfs_remove(struct dentry *dentry)
 {
-       struct dentry *child, *parent;
-
        if (IS_ERR_OR_NULL(dentry))
                return;
 
-       parent = dentry;
- down:
-       inode_lock(d_inode(parent));
- loop:
-       /*
-        * The parent->d_subdirs is protected by the d_lock. Outside that
-        * lock, the child can be unlinked and set to be freed which can
-        * use the d_u.d_child as the rcu head and corrupt this list.
-        */
-       spin_lock(&parent->d_lock);
-       list_for_each_entry(child, &parent->d_subdirs, d_child) {
-               if (!simple_positive(child))
-                       continue;
-
-               /* perhaps simple_empty(child) makes more sense */
-               if (!list_empty(&child->d_subdirs)) {
-                       spin_unlock(&parent->d_lock);
-                       inode_unlock(d_inode(parent));
-                       parent = child;
-                       goto down;
-               }
-
-               spin_unlock(&parent->d_lock);
-
-               if (!__debugfs_remove(child, parent))
-                       simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-
-               /*
-                * The parent->d_lock protects agaist child from unlinking
-                * from d_subdirs. When releasing the parent->d_lock we can
-                * no longer trust that the next pointer is valid.
-                * Restart the loop. We'll skip this one with the
-                * simple_positive() check.
-                */
-               goto loop;
-       }
-       spin_unlock(&parent->d_lock);
-
-       inode_unlock(d_inode(parent));
-       child = parent;
-       parent = parent->d_parent;
-       inode_lock(d_inode(parent));
-
-       if (child != dentry)
-               /* go up */
-               goto loop;
-
-       if (!__debugfs_remove(child, parent))
-               simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-       inode_unlock(d_inode(parent));
+       simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count);
+       simple_recursive_removal(dentry, remove_one);
+       simple_release_fs(&debugfs_mount, &debugfs_mount_count);
 }
-EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
+EXPORT_SYMBOL_GPL(debugfs_remove);
 
 /**
  * debugfs_rename - rename a file/directory in the debugfs filesystem
index 8aa0ea8..78e41c7 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/seq_file.h>
 #include <linux/idr.h>
 
+DEFINE_PER_CPU(int, eventfd_wake_count);
+
 static DEFINE_IDA(eventfd_ida);
 
 struct eventfd_ctx {
@@ -60,12 +62,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
 {
        unsigned long flags;
 
+       /*
+        * Deadlock or stack overflow issues can happen if we recurse here
+        * through waitqueue wakeup handlers. If the caller users potentially
+        * nested waitqueues with custom wakeup handlers, then it should
+        * check eventfd_signal_count() before calling this function. If
+        * it returns true, the eventfd_signal() call should be deferred to a
+        * safe context.
+        */
+       if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+               return 0;
+
        spin_lock_irqsave(&ctx->wqh.lock, flags);
+       this_cpu_inc(eventfd_wake_count);
        if (ULLONG_MAX - ctx->count < n)
                n = ULLONG_MAX - ctx->count;
        ctx->count += n;
        if (waitqueue_active(&ctx->wqh))
                wake_up_locked_poll(&ctx->wqh, EPOLLIN);
+       this_cpu_dec(eventfd_wake_count);
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
        return n;
index 88b213b..8434217 100644 (file)
@@ -6043,7 +6043,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
                bh = ext4_bread(handle, inode, blk,
                                EXT4_GET_BLOCKS_CREATE |
                                EXT4_GET_BLOCKS_METADATA_NOFAIL);
-       } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
+       } while (PTR_ERR(bh) == -ENOSPC &&
                 ext4_should_retry_alloc(inode->i_sb, &retries));
        if (IS_ERR(bh))
                return PTR_ERR(bh);
index 86ddbb5..0d4da64 100644 (file)
@@ -829,18 +829,12 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
                inode->i_uid = attr->ia_uid;
        if (ia_valid & ATTR_GID)
                inode->i_gid = attr->ia_gid;
-       if (ia_valid & ATTR_ATIME) {
-               inode->i_atime = timestamp_truncate(attr->ia_atime,
-                                                 inode);
-       }
-       if (ia_valid & ATTR_MTIME) {
-               inode->i_mtime = timestamp_truncate(attr->ia_mtime,
-                                                 inode);
-       }
-       if (ia_valid & ATTR_CTIME) {
-               inode->i_ctime = timestamp_truncate(attr->ia_ctime,
-                                                 inode);
-       }
+       if (ia_valid & ATTR_ATIME)
+               inode->i_atime = attr->ia_atime;
+       if (ia_valid & ATTR_MTIME)
+               inode->i_mtime = attr->ia_mtime;
+       if (ia_valid & ATTR_CTIME)
+               inode->i_ctime = attr->ia_ctime;
        if (ia_valid & ATTR_MODE) {
                umode_t mode = attr->ia_mode;
 
index 3314a0f..9d02cdc 100644 (file)
@@ -875,7 +875,7 @@ static int truncate_dnode(struct dnode_of_data *dn)
 
        /* get direct node */
        page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
-       if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
+       if (PTR_ERR(page) == -ENOENT)
                return 1;
        else if (IS_ERR(page))
                return PTR_ERR(page);
index 1e08bd5..f1b2a1f 100644 (file)
@@ -271,6 +271,14 @@ static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
 {
        return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
 }
+
+static inline struct timespec64 fat_timespec64_trunc_10ms(struct timespec64 ts)
+{
+       if (ts.tv_nsec)
+               ts.tv_nsec -= ts.tv_nsec % 10000000UL;
+       return ts;
+}
+
 /*
  * truncate the various times with appropriate granularity:
  *   root inode:
@@ -308,7 +316,7 @@ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
        }
        if (flags & S_CTIME) {
                if (sbi->options.isvfat)
-                       inode->i_ctime = timespec64_trunc(*now, 10000000);
+                       inode->i_ctime = fat_timespec64_trunc_10ms(*now);
                else
                        inode->i_ctime = fat_timespec64_trunc_2secs(*now);
        }
index 9616af3..08e91ef 100644 (file)
@@ -111,7 +111,7 @@ extern void fscache_enqueue_object(struct fscache_object *);
  * object-list.c
  */
 #ifdef CONFIG_FSCACHE_OBJECT_LIST
-extern const struct file_operations fscache_objlist_fops;
+extern const struct proc_ops fscache_objlist_proc_ops;
 
 extern void fscache_objlist_add(struct fscache_object *);
 extern void fscache_objlist_remove(struct fscache_object *);
index 72ebfe5..e106a1a 100644 (file)
@@ -7,6 +7,7 @@
 
 #define FSCACHE_DEBUG_LEVEL COOKIE
 #include <linux/module.h>
+#include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/key.h>
@@ -405,9 +406,9 @@ static int fscache_objlist_release(struct inode *inode, struct file *file)
        return seq_release(inode, file);
 }
 
-const struct file_operations fscache_objlist_fops = {
-       .open           = fscache_objlist_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = fscache_objlist_release,
+const struct proc_ops fscache_objlist_proc_ops = {
+       .proc_open      = fscache_objlist_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = fscache_objlist_release,
 };
index 5523446..90a7bc2 100644 (file)
@@ -35,7 +35,7 @@ int __init fscache_proc_init(void)
 
 #ifdef CONFIG_FSCACHE_OBJECT_LIST
        if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
-                        &fscache_objlist_fops))
+                        &fscache_objlist_proc_ops))
                goto error_objects;
 #endif
 
index ea15c6d..c7418b0 100644 (file)
@@ -1683,12 +1683,9 @@ EXPORT_SYMBOL(generic_update_time);
  */
 static int update_time(struct inode *inode, struct timespec64 *time, int flags)
 {
-       int (*update_time)(struct inode *, struct timespec64 *, int);
-
-       update_time = inode->i_op->update_time ? inode->i_op->update_time :
-               generic_update_time;
-
-       return update_time(inode, time, flags);
+       if (inode->i_op->update_time)
+               return inode->i_op->update_time(inode, time, flags);
+       return generic_update_time(inode, time, flags);
 }
 
 /**
@@ -2153,30 +2150,6 @@ void inode_nohighmem(struct inode *inode)
 }
 EXPORT_SYMBOL(inode_nohighmem);
 
-/**
- * timespec64_trunc - Truncate timespec64 to a granularity
- * @t: Timespec64
- * @gran: Granularity in ns.
- *
- * Truncate a timespec64 to a granularity. Always rounds down. gran must
- * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
- */
-struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran)
-{
-       /* Avoid division in the common cases 1 ns and 1 s. */
-       if (gran == 1) {
-               /* nothing */
-       } else if (gran == NSEC_PER_SEC) {
-               t.tv_nsec = 0;
-       } else if (gran > 1 && gran < NSEC_PER_SEC) {
-               t.tv_nsec -= t.tv_nsec % gran;
-       } else {
-               WARN(1, "illegal file time granularity: %u", gran);
-       }
-       return t;
-}
-EXPORT_SYMBOL(timespec64_trunc);
-
 /**
  * timestamp_truncate - Truncate timespec to a granularity
  * @t: Timespec
index 1806afd..77f22c3 100644 (file)
@@ -585,8 +585,7 @@ struct io_submit_state {
         * io_kiocb alloc cache
         */
        void                    *reqs[IO_IOPOLL_BATCH];
-       unsigned                int free_reqs;
-       unsigned                int cur_req;
+       unsigned int            free_reqs;
 
        /*
         * File reference cache
@@ -754,6 +753,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                 struct io_uring_files_update *ip,
                                 unsigned nr_args);
 static int io_grab_files(struct io_kiocb *req);
+static void io_ring_file_ref_flush(struct fixed_file_data *data);
 
 static struct kmem_cache *req_cachep;
 
@@ -1020,21 +1020,28 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
 
 static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
 {
+       if (!ctx->cq_ev_fd)
+               return false;
        if (!ctx->eventfd_async)
                return true;
        return io_wq_current_is_worker() || in_interrupt();
 }
 
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+static void __io_cqring_ev_posted(struct io_ring_ctx *ctx, bool trigger_ev)
 {
        if (waitqueue_active(&ctx->wait))
                wake_up(&ctx->wait);
        if (waitqueue_active(&ctx->sqo_wait))
                wake_up(&ctx->sqo_wait);
-       if (ctx->cq_ev_fd && io_should_trigger_evfd(ctx))
+       if (trigger_ev)
                eventfd_signal(ctx->cq_ev_fd, 1);
 }
 
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+       __io_cqring_ev_posted(ctx, io_should_trigger_evfd(ctx));
+}
+
 /* Returns true if there are no backlogged entries after the flush */
 static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 {
@@ -1183,12 +1190,10 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
                        ret = 1;
                }
                state->free_reqs = ret - 1;
-               state->cur_req = 1;
-               req = state->reqs[0];
+               req = state->reqs[ret - 1];
        } else {
-               req = state->reqs[state->cur_req];
                state->free_reqs--;
-               state->cur_req++;
+               req = state->reqs[state->free_reqs];
        }
 
 got_it:
@@ -1855,9 +1860,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        unsigned ioprio;
        int ret;
 
-       if (!req->file)
-               return -EBADF;
-
        if (S_ISREG(file_inode(req->file)->i_mode))
                req->flags |= REQ_F_ISREG;
 
@@ -1866,8 +1868,11 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                req->flags |= REQ_F_CUR_POS;
                kiocb->ki_pos = req->file->f_pos;
        }
-       kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
        kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
+       kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
+       ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
+       if (unlikely(ret))
+               return ret;
 
        ioprio = READ_ONCE(sqe->ioprio);
        if (ioprio) {
@@ -1879,10 +1884,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        } else
                kiocb->ki_ioprio = get_current_ioprio();
 
-       ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
-       if (unlikely(ret))
-               return ret;
-
        /* don't allow async punt if RWF_NOWAIT was requested */
        if ((kiocb->ki_flags & IOCB_NOWAIT) ||
            (req->file->f_flags & O_NONBLOCK))
@@ -2164,10 +2165,12 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
 {
        if (!io_op_defs[req->opcode].async_ctx)
                return 0;
-       if (!req->io && io_alloc_async_ctx(req))
-               return -ENOMEM;
+       if (!req->io) {
+               if (io_alloc_async_ctx(req))
+                       return -ENOMEM;
 
-       io_req_map_rw(req, io_size, iovec, fast_iov, iter);
+               io_req_map_rw(req, io_size, iovec, fast_iov, iter);
+       }
        req->work.func = io_rw_async;
        return 0;
 }
@@ -2724,9 +2727,16 @@ static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
        struct io_fadvise *fa = &req->fadvise;
        int ret;
 
-       /* DONTNEED may block, others _should_ not */
-       if (fa->advice == POSIX_FADV_DONTNEED && force_nonblock)
-               return -EAGAIN;
+       if (force_nonblock) {
+               switch (fa->advice) {
+               case POSIX_FADV_NORMAL:
+               case POSIX_FADV_RANDOM:
+               case POSIX_FADV_SEQUENTIAL:
+                       break;
+               default:
+                       return -EAGAIN;
+               }
+       }
 
        ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
        if (ret < 0)
@@ -2837,16 +2847,13 @@ static void io_close_finish(struct io_wq_work **workptr)
                int ret;
 
                ret = filp_close(req->close.put_file, req->work.files);
-               if (ret < 0) {
+               if (ret < 0)
                        req_set_fail_links(req);
-               }
                io_cqring_add_event(req, ret);
        }
 
        fput(req->close.put_file);
 
-       /* we bypassed the re-issue, drop the submission reference */
-       io_put_req(req);
        io_put_req_find_next(req, &nxt);
        if (nxt)
                io_wq_assign_next(workptr, nxt);
@@ -2888,7 +2895,13 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
 
 eagain:
        req->work.func = io_close_finish;
-       return -EAGAIN;
+       /*
+        * Do manual async queue here to avoid grabbing files - we don't
+        * need the files, and it'll cause io_close_finish() to close
+        * the file again and cause a double CQE entry for this request
+        */
+       io_queue_async_work(req);
+       return 0;
 }
 
 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -3083,7 +3096,8 @@ static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
                else if (force_nonblock)
                        flags |= MSG_DONTWAIT;
 
-               ret = __sys_sendmsg_sock(sock, &msg, flags);
+               msg.msg_flags = flags;
+               ret = sock_sendmsg(sock, &msg);
                if (force_nonblock && ret == -EAGAIN)
                        return -EAGAIN;
                if (ret == -ERESTARTSYS)
@@ -3109,6 +3123,7 @@ static int io_recvmsg_prep(struct io_kiocb *req,
 
        sr->msg_flags = READ_ONCE(sqe->msg_flags);
        sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       sr->len = READ_ONCE(sqe->len);
 
        if (!io || req->opcode == IORING_OP_RECV)
                return 0;
@@ -3227,7 +3242,7 @@ static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
                else if (force_nonblock)
                        flags |= MSG_DONTWAIT;
 
-               ret = __sys_recvmsg_sock(sock, &msg, NULL, NULL, flags);
+               ret = sock_recvmsg(sock, &msg, flags);
                if (force_nonblock && ret == -EAGAIN)
                        return -EAGAIN;
                if (ret == -ERESTARTSYS)
@@ -3561,6 +3576,14 @@ static void io_poll_flush(struct io_wq_work **workptr)
                __io_poll_flush(req->ctx, nodes);
 }
 
+static void io_poll_trigger_evfd(struct io_wq_work **workptr)
+{
+       struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+
+       eventfd_signal(req->ctx->cq_ev_fd, 1);
+       io_put_req(req);
+}
+
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                        void *key)
 {
@@ -3586,14 +3609,22 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 
                if (llist_empty(&ctx->poll_llist) &&
                    spin_trylock_irqsave(&ctx->completion_lock, flags)) {
+                       bool trigger_ev;
+
                        hash_del(&req->hash_node);
                        io_poll_complete(req, mask, 0);
-                       req->flags |= REQ_F_COMP_LOCKED;
-                       io_put_req(req);
-                       spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-                       io_cqring_ev_posted(ctx);
-                       req = NULL;
+                       trigger_ev = io_should_trigger_evfd(ctx);
+                       if (trigger_ev && eventfd_signal_count()) {
+                               trigger_ev = false;
+                               req->work.func = io_poll_trigger_evfd;
+                       } else {
+                               req->flags |= REQ_F_COMP_LOCKED;
+                               io_put_req(req);
+                               req = NULL;
+                       }
+                       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+                       __io_cqring_ev_posted(ctx, trigger_ev);
                } else {
                        req->result = mask;
                        req->llist_node.next = NULL;
@@ -4815,8 +4846,7 @@ static void io_submit_state_end(struct io_submit_state *state)
        blk_finish_plug(&state->plug);
        io_file_put(state);
        if (state->free_reqs)
-               kmem_cache_free_bulk(req_cachep, state->free_reqs,
-                                       &state->reqs[state->cur_req]);
+               kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
 }
 
 /*
@@ -5041,7 +5071,8 @@ static int io_sq_thread(void *data)
                         * reap events and wake us up.
                         */
                        if (inflight ||
-                           (!time_after(jiffies, timeout) && ret != -EBUSY)) {
+                           (!time_after(jiffies, timeout) && ret != -EBUSY &&
+                           !percpu_ref_is_dying(&ctx->refs))) {
                                cond_resched();
                                continue;
                        }
@@ -5231,15 +5262,10 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
        if (!data)
                return -ENXIO;
 
-       /* protect against inflight atomic switch, which drops the ref */
-       percpu_ref_get(&data->refs);
-       /* wait for existing switches */
-       flush_work(&data->ref_work);
        percpu_ref_kill_and_confirm(&data->refs, io_file_ref_kill);
-       wait_for_completion(&data->done);
-       percpu_ref_put(&data->refs);
-       /* flush potential new switch */
        flush_work(&data->ref_work);
+       wait_for_completion(&data->done);
+       io_ring_file_ref_flush(data);
        percpu_ref_exit(&data->refs);
 
        __io_sqe_files_unregister(ctx);
@@ -5477,14 +5503,11 @@ struct io_file_put {
        struct completion *done;
 };
 
-static void io_ring_file_ref_switch(struct work_struct *work)
+static void io_ring_file_ref_flush(struct fixed_file_data *data)
 {
        struct io_file_put *pfile, *tmp;
-       struct fixed_file_data *data;
        struct llist_node *node;
 
-       data = container_of(work, struct fixed_file_data, ref_work);
-
        while ((node = llist_del_all(&data->put_llist)) != NULL) {
                llist_for_each_entry_safe(pfile, tmp, node, llist) {
                        io_ring_file_put(data->ctx, pfile->file);
@@ -5494,7 +5517,14 @@ static void io_ring_file_ref_switch(struct work_struct *work)
                                kfree(pfile);
                }
        }
+}
 
+static void io_ring_file_ref_switch(struct work_struct *work)
+{
+       struct fixed_file_data *data;
+
+       data = container_of(work, struct fixed_file_data, ref_work);
+       io_ring_file_ref_flush(data);
        percpu_ref_get(&data->refs);
        percpu_ref_switch_to_percpu(&data->refs);
 }
@@ -5505,8 +5535,14 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
 
        data = container_of(ref, struct fixed_file_data, refs);
 
-       /* we can't safely switch from inside this context, punt to wq */
-       queue_work(system_wq, &data->ref_work);
+       /*
+        * We can't safely switch from inside this context, punt to wq. If
+        * the table ref is going away, the table is being unregistered.
+        * Don't queue up the async work for that case, the caller will
+        * handle it.
+        */
+       if (!percpu_ref_is_dying(&data->refs))
+               queue_work(system_wq, &data->ref_work);
 }
 
 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
@@ -6295,6 +6331,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
        percpu_ref_kill(&ctx->refs);
        mutex_unlock(&ctx->uring_lock);
 
+       /*
+        * Wait for sq thread to idle, if we have one. It won't spin on new
+        * work after we've killed the ctx ref above. This is important to do
+        * before we cancel existing commands, as the thread could otherwise
+        * be queueing new work post that. If that's work we need to cancel,
+        * it could cause shutdown to hang.
+        */
+       while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
+               cpu_relax();
+
        io_kill_timeouts(ctx);
        io_poll_remove_all(ctx);
 
@@ -6501,6 +6547,80 @@ out_fput:
        return submitted ? submitted : ret;
 }
 
+static int io_uring_show_cred(int id, void *p, void *data)
+{
+       const struct cred *cred = p;
+       struct seq_file *m = data;
+       struct user_namespace *uns = seq_user_ns(m);
+       struct group_info *gi;
+       kernel_cap_t cap;
+       unsigned __capi;
+       int g;
+
+       seq_printf(m, "%5d\n", id);
+       seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
+       seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
+       seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
+       seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
+       seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
+       seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
+       seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
+       seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
+       seq_puts(m, "\n\tGroups:\t");
+       gi = cred->group_info;
+       for (g = 0; g < gi->ngroups; g++) {
+               seq_put_decimal_ull(m, g ? " " : "",
+                                       from_kgid_munged(uns, gi->gid[g]));
+       }
+       seq_puts(m, "\n\tCapEff:\t");
+       cap = cred->cap_effective;
+       CAP_FOR_EACH_U32(__capi)
+               seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
+       seq_putc(m, '\n');
+       return 0;
+}
+
+static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
+{
+       int i;
+
+       mutex_lock(&ctx->uring_lock);
+       seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
+       for (i = 0; i < ctx->nr_user_files; i++) {
+               struct fixed_file_table *table;
+               struct file *f;
+
+               table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
+               f = table->files[i & IORING_FILE_TABLE_MASK];
+               if (f)
+                       seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
+               else
+                       seq_printf(m, "%5u: <none>\n", i);
+       }
+       seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
+       for (i = 0; i < ctx->nr_user_bufs; i++) {
+               struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
+
+               seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
+                                               (unsigned int) buf->len);
+       }
+       if (!idr_is_empty(&ctx->personality_idr)) {
+               seq_printf(m, "Personalities:\n");
+               idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
+       }
+       mutex_unlock(&ctx->uring_lock);
+}
+
+static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
+{
+       struct io_ring_ctx *ctx = f->private_data;
+
+       if (percpu_ref_tryget(&ctx->refs)) {
+               __io_uring_show_fdinfo(ctx, m);
+               percpu_ref_put(&ctx->refs);
+       }
+}
+
 static const struct file_operations io_uring_fops = {
        .release        = io_uring_release,
        .flush          = io_uring_flush,
@@ -6511,6 +6631,7 @@ static const struct file_operations io_uring_fops = {
 #endif
        .poll           = io_uring_poll,
        .fasync         = io_uring_fasync,
+       .show_fdinfo    = io_uring_show_fdinfo,
 };
 
 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
@@ -6963,6 +7084,39 @@ out_fput:
 
 static int __init io_uring_init(void)
 {
+#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
+       BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
+       BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
+} while (0)
+
+#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
+       __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
+       BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
+       BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
+       BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
+       BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
+       BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
+       BUILD_BUG_SQE_ELEM(8,  __u64,  off);
+       BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
+       BUILD_BUG_SQE_ELEM(16, __u64,  addr);
+       BUILD_BUG_SQE_ELEM(24, __u32,  len);
+       BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
+       BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
+       BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
+       BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
+       BUILD_BUG_SQE_ELEM(28, __u16,  poll_events);
+       BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
+       BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
+       BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
+       BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
+       BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
+       BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
+       BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
+       BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
+       BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
+       BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
+       BUILD_BUG_SQE_ELEM(42, __u16,  personality);
+
        BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
        req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
        return 0;
index 60bf8ff..eb8ca44 100644 (file)
@@ -1074,12 +1074,11 @@ static int jbd2_seq_info_release(struct inode *inode, struct file *file)
        return seq_release(inode, file);
 }
 
-static const struct file_operations jbd2_seq_info_fops = {
-       .owner          = THIS_MODULE,
-       .open           = jbd2_seq_info_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = jbd2_seq_info_release,
+static const struct proc_ops jbd2_info_proc_ops = {
+       .proc_open      = jbd2_seq_info_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = jbd2_seq_info_release,
 };
 
 static struct proc_dir_entry *proc_jbd2_stats;
@@ -1089,7 +1088,7 @@ static void jbd2_stats_proc_init(journal_t *journal)
        journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats);
        if (journal->j_proc_entry) {
                proc_create_data("info", S_IRUGO, journal->j_proc_entry,
-                                &jbd2_seq_info_fops, journal);
+                                &jbd2_info_proc_ops, journal);
        }
 }
 
index 888cdd6..44b62b3 100644 (file)
@@ -43,12 +43,12 @@ static ssize_t jfs_loglevel_proc_write(struct file *file,
        return count;
 }
 
-static const struct file_operations jfs_loglevel_proc_fops = {
-       .open           = jfs_loglevel_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = jfs_loglevel_proc_write,
+static const struct proc_ops jfs_loglevel_proc_ops = {
+       .proc_open      = jfs_loglevel_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = jfs_loglevel_proc_write,
 };
 #endif
 
@@ -68,7 +68,7 @@ void jfs_proc_init(void)
 #endif
 #ifdef CONFIG_JFS_DEBUG
        proc_create_single("TxAnchor", 0, base, jfs_txanchor_proc_show);
-       proc_create("loglevel", 0, base, &jfs_loglevel_proc_fops);
+       proc_create("loglevel", 0, base, &jfs_loglevel_proc_ops);
 #endif
 }
 
index caade18..7dfcab2 100644 (file)
@@ -4027,7 +4027,6 @@ static int dbGetL2AGSize(s64 nblocks)
  */
 #define MAXL0PAGES     (1 + LPERCTL)
 #define MAXL1PAGES     (1 + LPERCTL * MAXL0PAGES)
-#define MAXL2PAGES     (1 + LPERCTL * MAXL1PAGES)
 
 /*
  * convert number of map pages to the zero origin top dmapctl level
index eac277c..d0f7a5a 100644 (file)
@@ -160,9 +160,9 @@ static inline void set_inode_attr(struct inode *inode,
 {
        inode->i_uid = attrs->ia_uid;
        inode->i_gid = attrs->ia_gid;
-       inode->i_atime = timestamp_truncate(attrs->ia_atime, inode);
-       inode->i_mtime = timestamp_truncate(attrs->ia_mtime, inode);
-       inode->i_ctime = timestamp_truncate(attrs->ia_ctime, inode);
+       inode->i_atime = attrs->ia_atime;
+       inode->i_mtime = attrs->ia_mtime;
+       inode->i_ctime = attrs->ia_ctime;
 }
 
 static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
index 1463b03..c686bd9 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/buffer_head.h> /* sync_mapping_buffers */
 #include <linux/fs_context.h>
 #include <linux/pseudo_fs.h>
+#include <linux/fsnotify.h>
 
 #include <linux/uaccess.h>
 
@@ -239,6 +240,75 @@ const struct inode_operations simple_dir_inode_operations = {
 };
 EXPORT_SYMBOL(simple_dir_inode_operations);
 
+static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
+{
+       struct dentry *child = NULL;
+       struct list_head *p = prev ? &prev->d_child : &parent->d_subdirs;
+
+       spin_lock(&parent->d_lock);
+       while ((p = p->next) != &parent->d_subdirs) {
+               struct dentry *d = container_of(p, struct dentry, d_child);
+               if (simple_positive(d)) {
+                       spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+                       if (simple_positive(d))
+                               child = dget_dlock(d);
+                       spin_unlock(&d->d_lock);
+                       if (likely(child))
+                               break;
+               }
+       }
+       spin_unlock(&parent->d_lock);
+       dput(prev);
+       return child;
+}
+
+void simple_recursive_removal(struct dentry *dentry,
+                              void (*callback)(struct dentry *))
+{
+       struct dentry *this = dget(dentry);
+       while (true) {
+               struct dentry *victim = NULL, *child;
+               struct inode *inode = this->d_inode;
+
+               inode_lock(inode);
+               if (d_is_dir(this))
+                       inode->i_flags |= S_DEAD;
+               while ((child = find_next_child(this, victim)) == NULL) {
+                       // kill and ascend
+                       // update metadata while it's still locked
+                       inode->i_ctime = current_time(inode);
+                       clear_nlink(inode);
+                       inode_unlock(inode);
+                       victim = this;
+                       this = this->d_parent;
+                       inode = this->d_inode;
+                       inode_lock(inode);
+                       if (simple_positive(victim)) {
+                               d_invalidate(victim);   // avoid lost mounts
+                               if (d_is_dir(victim))
+                                       fsnotify_rmdir(inode, victim);
+                               else
+                                       fsnotify_unlink(inode, victim);
+                               if (callback)
+                                       callback(victim);
+                               dput(victim);           // unpin it
+                       }
+                       if (victim == dentry) {
+                               inode->i_ctime = inode->i_mtime =
+                                       current_time(inode);
+                               if (d_is_dir(dentry))
+                                       drop_nlink(inode);
+                               inode_unlock(inode);
+                               dput(dentry);
+                               return;
+                       }
+               }
+               inode_unlock(inode);
+               this = child;
+       }
+}
+EXPORT_SYMBOL(simple_recursive_removal);
+
 static const struct super_operations simple_super_operations = {
        .statfs         = simple_statfs,
 };
index ca9228a..a01f08c 100644 (file)
@@ -60,11 +60,11 @@ nlm_end_grace_read(struct file *file, char __user *buf, size_t size,
        return simple_read_from_buffer(buf, size, pos, resp, sizeof(resp));
 }
 
-static const struct file_operations lockd_end_grace_operations = {
-       .write          = nlm_end_grace_write,
-       .read           = nlm_end_grace_read,
-       .llseek         = default_llseek,
-       .release        = simple_transaction_release,
+static const struct proc_ops lockd_end_grace_proc_ops = {
+       .proc_write     = nlm_end_grace_write,
+       .proc_read      = nlm_end_grace_read,
+       .proc_lseek     = default_llseek,
+       .proc_release   = simple_transaction_release,
 };
 
 int __init
@@ -76,7 +76,7 @@ lockd_create_procfs(void)
        if (!entry)
                return -ENOMEM;
        entry = proc_create("nlm_end_grace", S_IRUGO|S_IWUSR, entry,
-                                &lockd_end_grace_operations);
+                           &lockd_end_grace_proc_ops);
        if (!entry) {
                remove_proc_entry("fs/lockd", NULL);
                return -ENOMEM;
index 11b42c5..7eb919f 100644 (file)
@@ -157,11 +157,11 @@ static int exports_proc_open(struct inode *inode, struct file *file)
        return exports_net_open(current->nsproxy->net_ns, file);
 }
 
-static const struct file_operations exports_proc_operations = {
-       .open           = exports_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops exports_proc_ops = {
+       .proc_open      = exports_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 static int exports_nfsd_open(struct inode *inode, struct file *file)
@@ -1431,8 +1431,7 @@ static int create_proc_exports_entry(void)
        entry = proc_mkdir("fs/nfs", NULL);
        if (!entry)
                return -ENOMEM;
-       entry = proc_create("exports", 0, entry,
-                                &exports_proc_operations);
+       entry = proc_create("exports", 0, entry, &exports_proc_ops);
        if (!entry) {
                remove_proc_entry("fs/nfs", NULL);
                return -ENOMEM;
index 9bce3b9..b1bc582 100644 (file)
@@ -84,17 +84,17 @@ static int nfsd_proc_open(struct inode *inode, struct file *file)
        return single_open(file, nfsd_proc_show, NULL);
 }
 
-static const struct file_operations nfsd_proc_fops = {
-       .open = nfsd_proc_open,
-       .read  = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
+static const struct proc_ops nfsd_proc_ops = {
+       .proc_open      = nfsd_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 void
 nfsd_stat_init(void)
 {
-       svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_fops);
+       svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
 }
 
 void
index 6c73884..d4359a1 100644 (file)
@@ -2899,18 +2899,12 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
                        ia_valid |= ATTR_MTIME | ATTR_CTIME;
                }
        }
-       if (ia_valid & ATTR_ATIME) {
-               vi->i_atime = timestamp_truncate(attr->ia_atime,
-                                              vi);
-       }
-       if (ia_valid & ATTR_MTIME) {
-               vi->i_mtime = timestamp_truncate(attr->ia_mtime,
-                                              vi);
-       }
-       if (ia_valid & ATTR_CTIME) {
-               vi->i_ctime = timestamp_truncate(attr->ia_ctime,
-                                              vi);
-       }
+       if (ia_valid & ATTR_ATIME)
+               vi->i_atime = attr->ia_atime;
+       if (ia_valid & ATTR_MTIME)
+               vi->i_mtime = attr->ia_mtime;
+       if (ia_valid & ATTR_CTIME)
+               vi->i_ctime = attr->ia_ctime;
        mark_inode_dirty(vi);
 out:
        return err;
index 9876db5..6cd5e49 100644 (file)
@@ -2101,17 +2101,15 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
 static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
                                            struct buffer_head **di_bh,
                                            int meta_level,
-                                           int overwrite_io,
                                            int write_sem,
                                            int wait)
 {
        int ret = 0;
 
        if (wait)
-               ret = ocfs2_inode_lock(inode, NULL, meta_level);
+               ret = ocfs2_inode_lock(inode, di_bh, meta_level);
        else
-               ret = ocfs2_try_inode_lock(inode,
-                       overwrite_io ? NULL : di_bh, meta_level);
+               ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
        if (ret < 0)
                goto out;
 
@@ -2136,6 +2134,7 @@ static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
 
 out_unlock:
        brelse(*di_bh);
+       *di_bh = NULL;
        ocfs2_inode_unlock(inode, meta_level);
 out:
        return ret;
@@ -2177,7 +2176,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                ret = ocfs2_inode_lock_for_extent_tree(inode,
                                                       &di_bh,
                                                       meta_level,
-                                                      overwrite_io,
                                                       write_sem,
                                                       wait);
                if (ret < 0) {
@@ -2233,13 +2231,13 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                                                           &di_bh,
                                                           meta_level,
                                                           write_sem);
+                       meta_level = 1;
+                       write_sem = 1;
                        ret = ocfs2_inode_lock_for_extent_tree(inode,
                                                               &di_bh,
                                                               meta_level,
-                                                              overwrite_io,
-                                                              1,
+                                                              write_sem,
                                                               wait);
-                       write_sem = 1;
                        if (ret < 0) {
                                if (ret != -EAGAIN)
                                        mlog_errno(ret);
index 4180c3e..939df99 100644 (file)
@@ -696,7 +696,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
 
        bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
                                               ac, cl);
-       if (IS_ERR(bg_bh) && (PTR_ERR(bg_bh) == -ENOSPC))
+       if (PTR_ERR(bg_bh) == -ENOSPC)
                bg_bh = ocfs2_block_group_alloc_discontig(handle,
                                                          alloc_inode,
                                                          ac, cl);
index 6220642..9fc47c2 100644 (file)
@@ -24,7 +24,7 @@
 
 static int ovl_ccup_set(const char *buf, const struct kernel_param *param)
 {
-       pr_warn("overlayfs: \"check_copy_up\" module option is obsolete\n");
+       pr_warn("\"check_copy_up\" module option is obsolete\n");
        return 0;
 }
 
@@ -123,6 +123,9 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
        loff_t old_pos = 0;
        loff_t new_pos = 0;
        loff_t cloned;
+       loff_t data_pos = -1;
+       loff_t hole_len;
+       bool skip_hole = false;
        int error = 0;
 
        if (len == 0)
@@ -144,7 +147,11 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
                goto out;
        /* Couldn't clone, so now we try to copy the data */
 
-       /* FIXME: copy up sparse files efficiently */
+       /* Check if lower fs supports seek operation */
+       if (old_file->f_mode & FMODE_LSEEK &&
+           old_file->f_op->llseek)
+               skip_hole = true;
+
        while (len) {
                size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
                long bytes;
@@ -157,6 +164,36 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
                        break;
                }
 
+               /*
+                * Fill zero for hole will cost unnecessary disk space
+                * and meanwhile slow down the copy-up speed, so we do
+                * an optimization for hole during copy-up, it relies
+                * on SEEK_DATA implementation in lower fs so if lower
+                * fs does not support it, copy-up will behave as before.
+                *
+                * Detail logic of hole detection as below:
+                * When we detect next data position is larger than current
+                * position we will skip that hole, otherwise we copy
+                * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually,
+                * it may not recognize all kind of holes and sometimes
+                * only skips partial of hole area. However, it will be
+                * enough for most of the use cases.
+                */
+
+               if (skip_hole && data_pos < old_pos) {
+                       data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
+                       if (data_pos > old_pos) {
+                               hole_len = data_pos - old_pos;
+                               len -= hole_len;
+                               old_pos = new_pos = data_pos;
+                               continue;
+                       } else if (data_pos == -ENXIO) {
+                               break;
+                       } else if (data_pos < 0) {
+                               skip_hole = false;
+                       }
+               }
+
                bytes = do_splice_direct(old_file, &old_pos,
                                         new_file, &new_pos,
                                         this_len, SPLICE_F_MOVE);
@@ -480,7 +517,7 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
        }
 
        inode_lock(temp->d_inode);
-       if (c->metacopy)
+       if (S_ISREG(c->stat.mode))
                err = ovl_set_size(temp, &c->stat);
        if (!err)
                err = ovl_set_attr(temp, &c->stat);
index 29abdb1..8e57d53 100644 (file)
@@ -35,7 +35,7 @@ int ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        dput(wdentry);
 
        if (err) {
-               pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n",
+               pr_err("cleanup of '%pd2' failed (%i)\n",
                       wdentry, err);
        }
 
@@ -53,7 +53,7 @@ static struct dentry *ovl_lookup_temp(struct dentry *workdir)
 
        temp = lookup_one_len(name, workdir, strlen(name));
        if (!IS_ERR(temp) && temp->d_inode) {
-               pr_err("overlayfs: workdir/%s already exists\n", name);
+               pr_err("workdir/%s already exists\n", name);
                dput(temp);
                temp = ERR_PTR(-EIO);
        }
@@ -134,7 +134,7 @@ static int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry,
        d = lookup_one_len(dentry->d_name.name, dentry->d_parent,
                           dentry->d_name.len);
        if (IS_ERR(d)) {
-               pr_warn("overlayfs: failed lookup after mkdir (%pd2, err=%i).\n",
+               pr_warn("failed lookup after mkdir (%pd2, err=%i).\n",
                        dentry, err);
                return PTR_ERR(d);
        }
@@ -267,7 +267,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
 
        d_instantiate(dentry, inode);
        if (inode != oip.newinode) {
-               pr_warn_ratelimited("overlayfs: newly created inode found in cache (%pd2)\n",
+               pr_warn_ratelimited("newly created inode found in cache (%pd2)\n",
                                    dentry);
        }
 
@@ -1009,7 +1009,7 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
                spin_unlock(&dentry->d_lock);
        } else {
                kfree(redirect);
-               pr_warn_ratelimited("overlayfs: failed to set redirect (%i)\n",
+               pr_warn_ratelimited("failed to set redirect (%i)\n",
                                    err);
                /* Fall back to userspace copy-up */
                err = -EXDEV;
index 70e5558..6f54d70 100644 (file)
@@ -30,7 +30,7 @@ static int ovl_encode_maybe_copy_up(struct dentry *dentry)
        }
 
        if (err) {
-               pr_warn_ratelimited("overlayfs: failed to copy up on encode (%pd2, err=%i)\n",
+               pr_warn_ratelimited("failed to copy up on encode (%pd2, err=%i)\n",
                                    dentry, err);
        }
 
@@ -244,7 +244,7 @@ out:
        return err;
 
 fail:
-       pr_warn_ratelimited("overlayfs: failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
+       pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
                            dentry, err, buflen, fh ? (int)fh->fb.len : 0,
                            fh ? fh->fb.type : 0);
        goto out;
@@ -358,7 +358,7 @@ static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx)
  */
 static struct dentry *ovl_lookup_real_one(struct dentry *connected,
                                          struct dentry *real,
-                                         struct ovl_layer *layer)
+                                         const struct ovl_layer *layer)
 {
        struct inode *dir = d_inode(connected);
        struct dentry *this, *parent = NULL;
@@ -406,7 +406,7 @@ out:
        return this;
 
 fail:
-       pr_warn_ratelimited("overlayfs: failed to lookup one by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
+       pr_warn_ratelimited("failed to lookup one by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
                            real, layer->idx, connected, err);
        this = ERR_PTR(err);
        goto out;
@@ -414,17 +414,16 @@ fail:
 
 static struct dentry *ovl_lookup_real(struct super_block *sb,
                                      struct dentry *real,
-                                     struct ovl_layer *layer);
+                                     const struct ovl_layer *layer);
 
 /*
  * Lookup an indexed or hashed overlay dentry by real inode.
  */
 static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
                                            struct dentry *real,
-                                           struct ovl_layer *layer)
+                                           const struct ovl_layer *layer)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
-       struct ovl_layer upper_layer = { .mnt = ofs->upper_mnt };
        struct dentry *index = NULL;
        struct dentry *this = NULL;
        struct inode *inode;
@@ -466,7 +465,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
                 * recursive call walks back from indexed upper to the topmost
                 * connected/hashed upper parent (or up to root).
                 */
-               this = ovl_lookup_real(sb, upper, &upper_layer);
+               this = ovl_lookup_real(sb, upper, &ofs->layers[0]);
                dput(upper);
        }
 
@@ -487,7 +486,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
  */
 static struct dentry *ovl_lookup_real_ancestor(struct super_block *sb,
                                               struct dentry *real,
-                                              struct ovl_layer *layer)
+                                              const struct ovl_layer *layer)
 {
        struct dentry *next, *parent = NULL;
        struct dentry *ancestor = ERR_PTR(-EIO);
@@ -540,7 +539,7 @@ static struct dentry *ovl_lookup_real_ancestor(struct super_block *sb,
  */
 static struct dentry *ovl_lookup_real(struct super_block *sb,
                                      struct dentry *real,
-                                     struct ovl_layer *layer)
+                                     const struct ovl_layer *layer)
 {
        struct dentry *connected;
        int err = 0;
@@ -631,7 +630,7 @@ static struct dentry *ovl_lookup_real(struct super_block *sb,
        return connected;
 
 fail:
-       pr_warn_ratelimited("overlayfs: failed to lookup by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
+       pr_warn_ratelimited("failed to lookup by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
                            real, layer->idx, connected, err);
        dput(connected);
        return ERR_PTR(err);
@@ -646,8 +645,7 @@ static struct dentry *ovl_get_dentry(struct super_block *sb,
                                     struct dentry *index)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
-       struct ovl_layer upper_layer = { .mnt = ofs->upper_mnt };
-       struct ovl_layer *layer = upper ? &upper_layer : lowerpath->layer;
+       const struct ovl_layer *layer = upper ? &ofs->layers[0] : lowerpath->layer;
        struct dentry *real = upper ?: (index ?: lowerpath->dentry);
 
        /*
@@ -822,7 +820,7 @@ out:
        return dentry;
 
 out_err:
-       pr_warn_ratelimited("overlayfs: failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n",
+       pr_warn_ratelimited("failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n",
                            fh_len, fh_type, flags, err);
        dentry = ERR_PTR(err);
        goto out;
@@ -831,7 +829,7 @@ out_err:
 static struct dentry *ovl_fh_to_parent(struct super_block *sb, struct fid *fid,
                                       int fh_len, int fh_type)
 {
-       pr_warn_ratelimited("overlayfs: connectable file handles not supported; use 'no_subtree_check' exportfs option.\n");
+       pr_warn_ratelimited("connectable file handles not supported; use 'no_subtree_check' exportfs option.\n");
        return ERR_PTR(-EACCES);
 }
 
index e235a63..a531721 100644 (file)
@@ -9,8 +9,19 @@
 #include <linux/xattr.h>
 #include <linux/uio.h>
 #include <linux/uaccess.h>
+#include <linux/splice.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
 #include "overlayfs.h"
 
+struct ovl_aio_req {
+       struct kiocb iocb;
+       struct kiocb *orig_iocb;
+       struct fd fd;
+};
+
+static struct kmem_cache *ovl_aio_request_cachep;
+
 static char ovl_whatisit(struct inode *inode, struct inode *realinode)
 {
        if (realinode != ovl_inode_upper(inode))
@@ -146,7 +157,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
        struct inode *inode = file_inode(file);
        struct fd real;
        const struct cred *old_cred;
-       ssize_t ret;
+       loff_t ret;
 
        /*
         * The two special cases below do not need to involve real fs,
@@ -171,7 +182,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
         * limitations that are more strict than ->s_maxbytes for specific
         * files, so we use the real file to perform seeks.
         */
-       inode_lock(inode);
+       ovl_inode_lock(inode);
        real.file->f_pos = file->f_pos;
 
        old_cred = ovl_override_creds(inode->i_sb);
@@ -179,7 +190,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
        revert_creds(old_cred);
 
        file->f_pos = real.file->f_pos;
-       inode_unlock(inode);
+       ovl_inode_unlock(inode);
 
        fdput(real);
 
@@ -225,6 +236,33 @@ static rwf_t ovl_iocb_to_rwf(struct kiocb *iocb)
        return flags;
 }
 
+static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
+{
+       struct kiocb *iocb = &aio_req->iocb;
+       struct kiocb *orig_iocb = aio_req->orig_iocb;
+
+       if (iocb->ki_flags & IOCB_WRITE) {
+               struct inode *inode = file_inode(orig_iocb->ki_filp);
+
+               file_end_write(iocb->ki_filp);
+               ovl_copyattr(ovl_inode_real(inode), inode);
+       }
+
+       orig_iocb->ki_pos = iocb->ki_pos;
+       fdput(aio_req->fd);
+       kmem_cache_free(ovl_aio_request_cachep, aio_req);
+}
+
+static void ovl_aio_rw_complete(struct kiocb *iocb, long res, long res2)
+{
+       struct ovl_aio_req *aio_req = container_of(iocb,
+                                                  struct ovl_aio_req, iocb);
+       struct kiocb *orig_iocb = aio_req->orig_iocb;
+
+       ovl_aio_cleanup_handler(aio_req);
+       orig_iocb->ki_complete(orig_iocb, res, res2);
+}
+
 static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *file = iocb->ki_filp;
@@ -240,10 +278,28 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                return ret;
 
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
-       ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
-                           ovl_iocb_to_rwf(iocb));
+       if (is_sync_kiocb(iocb)) {
+               ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
+                                   ovl_iocb_to_rwf(iocb));
+       } else {
+               struct ovl_aio_req *aio_req;
+
+               ret = -ENOMEM;
+               aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
+               if (!aio_req)
+                       goto out;
+
+               aio_req->fd = real;
+               real.flags = 0;
+               aio_req->orig_iocb = iocb;
+               kiocb_clone(&aio_req->iocb, iocb, real.file);
+               aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+               ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
+               if (ret != -EIOCBQUEUED)
+                       ovl_aio_cleanup_handler(aio_req);
+       }
+out:
        revert_creds(old_cred);
-
        ovl_file_accessed(file);
 
        fdput(real);
@@ -274,15 +330,33 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                goto out_unlock;
 
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
-       file_start_write(real.file);
-       ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
-                            ovl_iocb_to_rwf(iocb));
-       file_end_write(real.file);
+       if (is_sync_kiocb(iocb)) {
+               file_start_write(real.file);
+               ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
+                                    ovl_iocb_to_rwf(iocb));
+               file_end_write(real.file);
+               /* Update size */
+               ovl_copyattr(ovl_inode_real(inode), inode);
+       } else {
+               struct ovl_aio_req *aio_req;
+
+               ret = -ENOMEM;
+               aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
+               if (!aio_req)
+                       goto out;
+
+               file_start_write(real.file);
+               aio_req->fd = real;
+               real.flags = 0;
+               aio_req->orig_iocb = iocb;
+               kiocb_clone(&aio_req->iocb, iocb, real.file);
+               aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+               ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
+               if (ret != -EIOCBQUEUED)
+                       ovl_aio_cleanup_handler(aio_req);
+       }
+out:
        revert_creds(old_cred);
-
-       /* Update size */
-       ovl_copyattr(ovl_inode_real(inode), inode);
-
        fdput(real);
 
 out_unlock:
@@ -291,6 +365,48 @@ out_unlock:
        return ret;
 }
 
+static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
+                        struct pipe_inode_info *pipe, size_t len,
+                        unsigned int flags)
+{
+       ssize_t ret;
+       struct fd real;
+       const struct cred *old_cred;
+
+       ret = ovl_real_fdget(in, &real);
+       if (ret)
+               return ret;
+
+       old_cred = ovl_override_creds(file_inode(in)->i_sb);
+       ret = generic_file_splice_read(real.file, ppos, pipe, len, flags);
+       revert_creds(old_cred);
+
+       ovl_file_accessed(in);
+       fdput(real);
+       return ret;
+}
+
+static ssize_t
+ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
+                         loff_t *ppos, size_t len, unsigned int flags)
+{
+       struct fd real;
+       const struct cred *old_cred;
+       ssize_t ret;
+
+       ret = ovl_real_fdget(out, &real);
+       if (ret)
+               return ret;
+
+       old_cred = ovl_override_creds(file_inode(out)->i_sb);
+       ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
+       revert_creds(old_cred);
+
+       ovl_file_accessed(out);
+       fdput(real);
+       return ret;
+}
+
 static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct fd real;
@@ -647,7 +763,25 @@ const struct file_operations ovl_file_operations = {
        .fadvise        = ovl_fadvise,
        .unlocked_ioctl = ovl_ioctl,
        .compat_ioctl   = ovl_compat_ioctl,
+       .splice_read    = ovl_splice_read,
+       .splice_write   = ovl_splice_write,
 
        .copy_file_range        = ovl_copy_file_range,
        .remap_file_range       = ovl_remap_file_range,
 };
+
+int __init ovl_aio_request_cache_init(void)
+{
+       ovl_aio_request_cachep = kmem_cache_create("ovl_aio_req",
+                                                  sizeof(struct ovl_aio_req),
+                                                  0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!ovl_aio_request_cachep)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void ovl_aio_request_cache_destroy(void)
+{
+       kmem_cache_destroy(ovl_aio_request_cachep);
+}
index b045cf1..79e8994 100644 (file)
@@ -75,10 +75,9 @@ out:
        return err;
 }
 
-static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
-                          struct ovl_layer *lower_layer)
+static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
 {
-       bool samefs = ovl_same_sb(dentry->d_sb);
+       bool samefs = ovl_same_fs(dentry->d_sb);
        unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
 
        if (samefs) {
@@ -100,12 +99,10 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
                 * persistent for a given layer configuration.
                 */
                if (stat->ino >> shift) {
-                       pr_warn_ratelimited("overlayfs: inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
+                       pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
                                            dentry, stat->ino, xinobits);
                } else {
-                       if (lower_layer)
-                               stat->ino |= ((u64)lower_layer->fsid) << shift;
-
+                       stat->ino |= ((u64)fsid) << shift;
                        stat->dev = dentry->d_sb->s_dev;
                        return 0;
                }
@@ -124,15 +121,14 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
                 */
                stat->dev = dentry->d_sb->s_dev;
                stat->ino = dentry->d_inode->i_ino;
-       } else if (lower_layer && lower_layer->fsid) {
+       } else {
                /*
                 * For non-samefs setup, if we cannot map all layers st_ino
                 * to a unified address space, we need to make sure that st_dev
-                * is unique per lower fs. Upper layer uses real st_dev and
-                * lower layers use the unique anonymous bdev assigned to the
-                * lower fs.
+                * is unique per underlying fs, so we use the unique anonymous
+                * bdev assigned to the underlying fs.
                 */
-               stat->dev = lower_layer->fs->pseudo_dev;
+               stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
        }
 
        return 0;
@@ -146,8 +142,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
        struct path realpath;
        const struct cred *old_cred;
        bool is_dir = S_ISDIR(dentry->d_inode->i_mode);
-       bool samefs = ovl_same_sb(dentry->d_sb);
-       struct ovl_layer *lower_layer = NULL;
+       int fsid = 0;
        int err;
        bool metacopy_blocks = false;
 
@@ -168,9 +163,9 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
         * If lower filesystem supports NFS file handles, this also guaranties
         * persistent st_ino across mount cycle.
         */
-       if (!is_dir || samefs || ovl_xino_bits(dentry->d_sb)) {
+       if (!is_dir || ovl_same_dev(dentry->d_sb)) {
                if (!OVL_TYPE_UPPER(type)) {
-                       lower_layer = ovl_layer_lower(dentry);
+                       fsid = ovl_layer_lower(dentry)->fsid;
                } else if (OVL_TYPE_ORIGIN(type)) {
                        struct kstat lowerstat;
                        u32 lowermask = STATX_INO | STATX_BLOCKS |
@@ -200,14 +195,8 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
                        if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
                            (!ovl_verify_lower(dentry->d_sb) &&
                             (is_dir || lowerstat.nlink == 1))) {
-                               lower_layer = ovl_layer_lower(dentry);
-                               /*
-                                * Cannot use origin st_dev;st_ino because
-                                * origin inode content may differ from overlay
-                                * inode content.
-                                */
-                               if (samefs || lower_layer->fsid)
-                                       stat->ino = lowerstat.ino;
+                               fsid = ovl_layer_lower(dentry)->fsid;
+                               stat->ino = lowerstat.ino;
                        }
 
                        /*
@@ -241,7 +230,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
                }
        }
 
-       err = ovl_map_dev_ino(dentry, stat, lower_layer);
+       err = ovl_map_dev_ino(dentry, stat, fsid);
        if (err)
                goto out;
 
@@ -527,6 +516,27 @@ static const struct address_space_operations ovl_aops = {
  * [...] &ovl_i_mutex_dir_key[depth]   (stack_depth=2)
  * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
  * [...] &type->i_mutex_dir_key        (stack_depth=0)
+ *
+ * Locking order w.r.t ovl_want_write() is important for nested overlayfs.
+ *
+ * This chain is valid:
+ * - inode->i_rwsem                    (inode_lock[2])
+ * - upper_mnt->mnt_sb->s_writers      (ovl_want_write[0])
+ * - OVL_I(inode)->lock                        (ovl_inode_lock[2])
+ * - OVL_I(lowerinode)->lock           (ovl_inode_lock[1])
+ *
+ * And this chain is valid:
+ * - inode->i_rwsem                    (inode_lock[2])
+ * - OVL_I(inode)->lock                        (ovl_inode_lock[2])
+ * - lowerinode->i_rwsem               (inode_lock[1])
+ * - OVL_I(lowerinode)->lock           (ovl_inode_lock[1])
+ *
+ * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is
+ * held, because it is in reverse order of the non-nested case using the same
+ * upper fs:
+ * - inode->i_rwsem                    (inode_lock[1])
+ * - upper_mnt->mnt_sb->s_writers      (ovl_want_write[0])
+ * - OVL_I(inode)->lock                        (ovl_inode_lock[1])
  */
 #define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
 
@@ -565,7 +575,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
         * ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
         * upper inode i_ino on ovl_inode_init() or ovl_inode_update().
         */
-       if (ovl_same_sb(inode->i_sb) || xinobits) {
+       if (ovl_same_dev(inode->i_sb)) {
                inode->i_ino = ino;
                if (xinobits && fsid && !(ino >> (64 - xinobits)))
                        inode->i_ino |= (unsigned long)fsid << (64 - xinobits);
@@ -698,7 +708,7 @@ unsigned int ovl_get_nlink(struct dentry *lowerdentry,
        return nlink;
 
 fail:
-       pr_warn_ratelimited("overlayfs: failed to get index nlink (%pd2, err=%i)\n",
+       pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n",
                            upperdentry, err);
        return fallback;
 }
@@ -969,7 +979,7 @@ out:
        return inode;
 
 out_err:
-       pr_warn_ratelimited("overlayfs: failed to get inode (%i)\n", err);
+       pr_warn_ratelimited("failed to get inode (%i)\n", err);
        inode = ERR_PTR(err);
        goto out;
 }
index 76ff663..ed9e129 100644 (file)
@@ -141,10 +141,10 @@ out:
        return NULL;
 
 fail:
-       pr_warn_ratelimited("overlayfs: failed to get origin (%i)\n", res);
+       pr_warn_ratelimited("failed to get origin (%i)\n", res);
        goto out;
 invalid:
-       pr_warn_ratelimited("overlayfs: invalid origin (%*phN)\n", res, fh);
+       pr_warn_ratelimited("invalid origin (%*phN)\n", res, fh);
        goto out;
 }
 
@@ -322,16 +322,16 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
        struct dentry *origin = NULL;
        int i;
 
-       for (i = 0; i < ofs->numlower; i++) {
+       for (i = 1; i < ofs->numlayer; i++) {
                /*
                 * If lower fs uuid is not unique among lower fs we cannot match
                 * fh->uuid to layer.
                 */
-               if (ofs->lower_layers[i].fsid &&
-                   ofs->lower_layers[i].fs->bad_uuid)
+               if (ofs->layers[i].fsid &&
+                   ofs->layers[i].fs->bad_uuid)
                        continue;
 
-               origin = ovl_decode_real_fh(fh, ofs->lower_layers[i].mnt,
+               origin = ovl_decode_real_fh(fh, ofs->layers[i].mnt,
                                            connected);
                if (origin)
                        break;
@@ -354,13 +354,13 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
        }
        **stackp = (struct ovl_path){
                .dentry = origin,
-               .layer = &ofs->lower_layers[i]
+               .layer = &ofs->layers[i]
        };
 
        return 0;
 
 invalid:
-       pr_warn_ratelimited("overlayfs: invalid origin (%pd2, ftype=%x, origin ftype=%x).\n",
+       pr_warn_ratelimited("invalid origin (%pd2, ftype=%x, origin ftype=%x).\n",
                            upperdentry, d_inode(upperdentry)->i_mode & S_IFMT,
                            d_inode(origin)->i_mode & S_IFMT);
        dput(origin);
@@ -449,7 +449,7 @@ out:
 
 fail:
        inode = d_inode(real);
-       pr_warn_ratelimited("overlayfs: failed to verify %s (%pd2, ino=%lu, err=%i)\n",
+       pr_warn_ratelimited("failed to verify %s (%pd2, ino=%lu, err=%i)\n",
                            is_upper ? "upper" : "origin", real,
                            inode ? inode->i_ino : 0, err);
        goto out;
@@ -475,7 +475,7 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index)
                return upper ?: ERR_PTR(-ESTALE);
 
        if (!d_is_dir(upper)) {
-               pr_warn_ratelimited("overlayfs: invalid index upper (%pd2, upper=%pd2).\n",
+               pr_warn_ratelimited("invalid index upper (%pd2, upper=%pd2).\n",
                                    index, upper);
                dput(upper);
                return ERR_PTR(-EIO);
@@ -589,12 +589,12 @@ out:
        return err;
 
 fail:
-       pr_warn_ratelimited("overlayfs: failed to verify index (%pd2, ftype=%x, err=%i)\n",
+       pr_warn_ratelimited("failed to verify index (%pd2, ftype=%x, err=%i)\n",
                            index, d_inode(index)->i_mode & S_IFMT, err);
        goto out;
 
 orphan:
-       pr_warn_ratelimited("overlayfs: orphan index entry (%pd2, ftype=%x, nlink=%u)\n",
+       pr_warn_ratelimited("orphan index entry (%pd2, ftype=%x, nlink=%u)\n",
                            index, d_inode(index)->i_mode & S_IFMT,
                            d_inode(index)->i_nlink);
        err = -ENOENT;
@@ -696,7 +696,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
                        index = NULL;
                        goto out;
                }
-               pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
+               pr_warn_ratelimited("failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
                                    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
                                    d_inode(origin)->i_ino, name.len, name.name,
                                    err);
@@ -723,13 +723,13 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
                 * unlinked, which means that finding a lower origin on lookup
                 * whose index is a whiteout should be treated as an error.
                 */
-               pr_warn_ratelimited("overlayfs: bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n",
+               pr_warn_ratelimited("bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n",
                                    index, d_inode(index)->i_mode & S_IFMT,
                                    d_inode(origin)->i_mode & S_IFMT);
                goto fail;
        } else if (is_dir && verify) {
                if (!upper) {
-                       pr_warn_ratelimited("overlayfs: suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n",
+                       pr_warn_ratelimited("suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n",
                                            origin, index);
                        goto fail;
                }
@@ -738,7 +738,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
                err = ovl_verify_upper(index, upper, false);
                if (err) {
                        if (err == -ESTALE) {
-                               pr_warn_ratelimited("overlayfs: suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n",
+                               pr_warn_ratelimited("suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n",
                                                    upper, origin, index);
                        }
                        goto fail;
@@ -885,7 +885,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
        if (!d.stop && poe->numlower) {
                err = -ENOMEM;
-               stack = kcalloc(ofs->numlower, sizeof(struct ovl_path),
+               stack = kcalloc(ofs->numlayer - 1, sizeof(struct ovl_path),
                                GFP_KERNEL);
                if (!stack)
                        goto out_put_upper;
@@ -967,7 +967,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                 */
                err = -EPERM;
                if (d.redirect && !ofs->config.redirect_follow) {
-                       pr_warn_ratelimited("overlayfs: refusing to follow redirect for (%pd2)\n",
+                       pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n",
                                            dentry);
                        goto out_put;
                }
@@ -994,7 +994,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
                err = -EPERM;
                if (!ofs->config.metacopy) {
-                       pr_warn_ratelimited("overlay: refusing to follow metacopy origin for (%pd2)\n",
+                       pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n",
                                            dentry);
                        goto out_put;
                }
index f283b1d..3623d28 100644 (file)
@@ -9,6 +9,9 @@
 #include <linux/fs.h>
 #include "ovl_entry.h"
 
+#undef pr_fmt
+#define pr_fmt(fmt) "overlayfs: " fmt
+
 enum ovl_path_type {
        __OVL_PATH_UPPER        = (1 << 0),
        __OVL_PATH_MERGE        = (1 << 1),
@@ -221,7 +224,6 @@ int ovl_want_write(struct dentry *dentry);
 void ovl_drop_write(struct dentry *dentry);
 struct dentry *ovl_workdir(struct dentry *dentry);
 const struct cred *ovl_override_creds(struct super_block *sb);
-struct super_block *ovl_same_sb(struct super_block *sb);
 int ovl_can_decode_fh(struct super_block *sb);
 struct dentry *ovl_indexdir(struct super_block *sb);
 bool ovl_index_all(struct super_block *sb);
@@ -237,7 +239,7 @@ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
 struct dentry *ovl_dentry_upper(struct dentry *dentry);
 struct dentry *ovl_dentry_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_lowerdata(struct dentry *dentry);
-struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
+const struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_real(struct dentry *dentry);
 struct dentry *ovl_i_dentry_upper(struct inode *inode);
 struct inode *ovl_inode_upper(struct inode *inode);
@@ -299,11 +301,21 @@ static inline bool ovl_is_impuredir(struct dentry *dentry)
        return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
 }
 
-static inline unsigned int ovl_xino_bits(struct super_block *sb)
+/* All layers on same fs? */
+static inline bool ovl_same_fs(struct super_block *sb)
+{
+       return OVL_FS(sb)->xino_mode == 0;
+}
+
+/* All overlay inodes have same st_dev? */
+static inline bool ovl_same_dev(struct super_block *sb)
 {
-       struct ovl_fs *ofs = sb->s_fs_info;
+       return OVL_FS(sb)->xino_mode >= 0;
+}
 
-       return ofs->xino_bits;
+static inline unsigned int ovl_xino_bits(struct super_block *sb)
+{
+       return ovl_same_dev(sb) ? OVL_FS(sb)->xino_mode : 0;
 }
 
 static inline int ovl_inode_lock(struct inode *inode)
@@ -438,6 +450,8 @@ struct dentry *ovl_create_temp(struct dentry *workdir, struct ovl_cattr *attr);
 
 /* file.c */
 extern const struct file_operations ovl_file_operations;
+int __init ovl_aio_request_cache_init(void);
+void ovl_aio_request_cache_destroy(void);
 
 /* copy_up.c */
 int ovl_copy_up(struct dentry *dentry);
index 28348c4..89015ea 100644 (file)
@@ -24,6 +24,8 @@ struct ovl_sb {
        dev_t pseudo_dev;
        /* Unusable (conflicting) uuid */
        bool bad_uuid;
+       /* Used as a lower layer (but maybe also as upper) */
+       bool is_lower;
 };
 
 struct ovl_layer {
@@ -38,18 +40,18 @@ struct ovl_layer {
 };
 
 struct ovl_path {
-       struct ovl_layer *layer;
+       const struct ovl_layer *layer;
        struct dentry *dentry;
 };
 
 /* private information held for overlayfs's superblock */
 struct ovl_fs {
        struct vfsmount *upper_mnt;
-       unsigned int numlower;
-       /* Number of unique lower sb that differ from upper sb */
-       unsigned int numlowerfs;
-       struct ovl_layer *lower_layers;
-       struct ovl_sb *lower_fs;
+       unsigned int numlayer;
+       /* Number of unique fs among layers including upper fs */
+       unsigned int numfs;
+       const struct ovl_layer *layers;
+       struct ovl_sb *fs;
        /* workbasedir is the path at workdir= mount option */
        struct dentry *workbasedir;
        /* workdir is the 'work' directory under workbasedir */
@@ -71,10 +73,15 @@ struct ovl_fs {
        struct inode *workbasedir_trap;
        struct inode *workdir_trap;
        struct inode *indexdir_trap;
-       /* Inode numbers in all layers do not use the high xino_bits */
-       unsigned int xino_bits;
+       /* -1: disabled, 0: same fs, 1..32: number of unused ino bits */
+       int xino_mode;
 };
 
+static inline struct ovl_fs *OVL_FS(struct super_block *sb)
+{
+       return (struct ovl_fs *)sb->s_fs_info;
+}
+
 /* private information held for every overlayfs dentry */
 struct ovl_entry {
        union {
index 47a91c9..40ac9ce 100644 (file)
@@ -441,7 +441,7 @@ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
                               const char *name, int namelen)
 {
        if (ino >> (64 - xinobits)) {
-               pr_warn_ratelimited("overlayfs: d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
+               pr_warn_ratelimited("d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
                                    namelen, name, ino, xinobits);
                return ino;
        }
@@ -469,7 +469,7 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
        int xinobits = ovl_xino_bits(dir->d_sb);
        int err = 0;
 
-       if (!ovl_same_sb(dir->d_sb) && !xinobits)
+       if (!ovl_same_dev(dir->d_sb))
                goto out;
 
        if (p->name[0] == '.') {
@@ -504,7 +504,13 @@ get:
                if (err)
                        goto fail;
 
-               WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
+               /*
+                * Directory inode is always on overlay st_dev.
+                * Non-dir with ovl_same_dev() could be on pseudo st_dev in case
+                * of xino bits overflow.
+                */
+               WARN_ON_ONCE(S_ISDIR(stat.mode) &&
+                            dir->d_sb->s_dev != stat.dev);
                ino = stat.ino;
        } else if (xinobits && !OVL_TYPE_UPPER(type)) {
                ino = ovl_remap_lower_ino(ino, xinobits,
@@ -518,7 +524,7 @@ out:
        return err;
 
 fail:
-       pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n",
+       pr_warn_ratelimited("failed to look up (%s) for ino (%i)\n",
                            p->name, err);
        goto out;
 }
@@ -685,7 +691,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
        int err;
        struct ovl_dir_file *od = file->private_data;
        struct dentry *dir = file->f_path.dentry;
-       struct ovl_layer *lower_layer = ovl_layer_lower(dir);
+       const struct ovl_layer *lower_layer = ovl_layer_lower(dir);
        struct ovl_readdir_translate rdt = {
                .ctx.actor = ovl_fill_real,
                .orig_ctx = ctx,
@@ -738,7 +744,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
                 * entries.
                 */
                if (ovl_xino_bits(dentry->d_sb) ||
-                   (ovl_same_sb(dentry->d_sb) &&
+                   (ovl_same_fs(dentry->d_sb) &&
                     (ovl_is_impure_dir(file) ||
                      OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
                        return ovl_iterate_real(file, ctx);
@@ -965,7 +971,7 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
 
                dentry = lookup_one_len(p->name, upper, p->len);
                if (IS_ERR(dentry)) {
-                       pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
+                       pr_err("lookup '%s/%.*s' failed (%i)\n",
                               upper->d_name.name, p->len, p->name,
                               (int) PTR_ERR(dentry));
                        continue;
@@ -1147,6 +1153,6 @@ next:
 out:
        ovl_cache_free(&list);
        if (err)
-               pr_err("overlayfs: failed index dir cleanup (%i)\n", err);
+               pr_err("failed index dir cleanup (%i)\n", err);
        return err;
 }
index 7621ff1..319fe0d 100644 (file)
@@ -224,14 +224,14 @@ static void ovl_free_fs(struct ovl_fs *ofs)
        if (ofs->upperdir_locked)
                ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
        mntput(ofs->upper_mnt);
-       for (i = 0; i < ofs->numlower; i++) {
-               iput(ofs->lower_layers[i].trap);
-               mntput(ofs->lower_layers[i].mnt);
+       for (i = 1; i < ofs->numlayer; i++) {
+               iput(ofs->layers[i].trap);
+               mntput(ofs->layers[i].mnt);
        }
-       for (i = 0; i < ofs->numlowerfs; i++)
-               free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
-       kfree(ofs->lower_layers);
-       kfree(ofs->lower_fs);
+       kfree(ofs->layers);
+       for (i = 0; i < ofs->numfs; i++)
+               free_anon_bdev(ofs->fs[i].pseudo_dev);
+       kfree(ofs->fs);
 
        kfree(ofs->config.lowerdir);
        kfree(ofs->config.upperdir);
@@ -358,7 +358,7 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
        if (ofs->config.nfs_export != ovl_nfs_export_def)
                seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ?
                                                "on" : "off");
-       if (ofs->config.xino != ovl_xino_def())
+       if (ofs->config.xino != ovl_xino_def() && !ovl_same_fs(sb))
                seq_printf(m, ",xino=%s", ovl_xino_str[ofs->config.xino]);
        if (ofs->config.metacopy != ovl_metacopy_def)
                seq_printf(m, ",metacopy=%s",
@@ -462,7 +462,7 @@ static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode)
                if (ovl_redirect_always_follow)
                        config->redirect_follow = true;
        } else if (strcmp(mode, "nofollow") != 0) {
-               pr_err("overlayfs: bad mount option \"redirect_dir=%s\"\n",
+               pr_err("bad mount option \"redirect_dir=%s\"\n",
                       mode);
                return -EINVAL;
        }
@@ -560,14 +560,15 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                        break;
 
                default:
-                       pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
+                       pr_err("unrecognized mount option \"%s\" or missing value\n",
+                                       p);
                        return -EINVAL;
                }
        }
 
        /* Workdir is useless in non-upper mount */
        if (!config->upperdir && config->workdir) {
-               pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
+               pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
                        config->workdir);
                kfree(config->workdir);
                config->workdir = NULL;
@@ -587,7 +588,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
        /* Resolve metacopy -> redirect_dir dependency */
        if (config->metacopy && !config->redirect_dir) {
                if (metacopy_opt && redirect_opt) {
-                       pr_err("overlayfs: conflicting options: metacopy=on,redirect_dir=%s\n",
+                       pr_err("conflicting options: metacopy=on,redirect_dir=%s\n",
                               config->redirect_mode);
                        return -EINVAL;
                }
@@ -596,7 +597,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                         * There was an explicit redirect_dir=... that resulted
                         * in this conflict.
                         */
-                       pr_info("overlayfs: disabling metacopy due to redirect_dir=%s\n",
+                       pr_info("disabling metacopy due to redirect_dir=%s\n",
                                config->redirect_mode);
                        config->metacopy = false;
                } else {
@@ -692,7 +693,7 @@ out_unlock:
 out_dput:
        dput(work);
 out_err:
-       pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
+       pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n",
                ofs->config.workdir, name, -err);
        work = NULL;
        goto out_unlock;
@@ -716,21 +717,21 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
        int err = -EINVAL;
 
        if (!*name) {
-               pr_err("overlayfs: empty lowerdir\n");
+               pr_err("empty lowerdir\n");
                goto out;
        }
        err = kern_path(name, LOOKUP_FOLLOW, path);
        if (err) {
-               pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+               pr_err("failed to resolve '%s': %i\n", name, err);
                goto out;
        }
        err = -EINVAL;
        if (ovl_dentry_weird(path->dentry)) {
-               pr_err("overlayfs: filesystem on '%s' not supported\n", name);
+               pr_err("filesystem on '%s' not supported\n", name);
                goto out_put;
        }
        if (!d_is_dir(path->dentry)) {
-               pr_err("overlayfs: '%s' not a directory\n", name);
+               pr_err("'%s' not a directory\n", name);
                goto out_put;
        }
        return 0;
@@ -752,7 +753,7 @@ static int ovl_mount_dir(const char *name, struct path *path)
 
                if (!err)
                        if (ovl_dentry_remote(path->dentry)) {
-                               pr_err("overlayfs: filesystem on '%s' not supported as upperdir\n",
+                               pr_err("filesystem on '%s' not supported as upperdir\n",
                                       tmp);
                                path_put_init(path);
                                err = -EINVAL;
@@ -769,7 +770,7 @@ static int ovl_check_namelen(struct path *path, struct ovl_fs *ofs,
        int err = vfs_statfs(path, &statfs);
 
        if (err)
-               pr_err("overlayfs: statfs failed on '%s'\n", name);
+               pr_err("statfs failed on '%s'\n", name);
        else
                ofs->namelen = max(ofs->namelen, statfs.f_namelen);
 
@@ -804,13 +805,13 @@ static int ovl_lower_dir(const char *name, struct path *path,
             (ofs->config.index && ofs->config.upperdir)) && !fh_type) {
                ofs->config.index = false;
                ofs->config.nfs_export = false;
-               pr_warn("overlayfs: fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n",
+               pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n",
                        name);
        }
 
        /* Check if lower fs has 32bit inode numbers */
        if (fh_type != FILEID_INO32_GEN)
-               ofs->xino_bits = 0;
+               ofs->xino_mode = -1;
 
        return 0;
 
@@ -996,7 +997,7 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
        err = PTR_ERR_OR_ZERO(trap);
        if (err) {
                if (err == -ELOOP)
-                       pr_err("overlayfs: conflicting %s path\n", name);
+                       pr_err("conflicting %s path\n", name);
                return err;
        }
 
@@ -1013,11 +1014,11 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
 static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
 {
        if (ofs->config.index) {
-               pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
+               pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
                       name);
                return -EBUSY;
        } else {
-               pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
+               pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
                        name);
                return 0;
        }
@@ -1035,7 +1036,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
 
        /* Upper fs should not be r/o */
        if (sb_rdonly(upperpath->mnt->mnt_sb)) {
-               pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n");
+               pr_err("upper fs is r/o, try multi-lower layers mount\n");
                err = -EINVAL;
                goto out;
        }
@@ -1052,7 +1053,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
        upper_mnt = clone_private_mount(upperpath);
        err = PTR_ERR(upper_mnt);
        if (IS_ERR(upper_mnt)) {
-               pr_err("overlayfs: failed to clone upperpath\n");
+               pr_err("failed to clone upperpath\n");
                goto out;
        }
 
@@ -1108,7 +1109,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
         * kernel upgrade. So warn instead of erroring out.
         */
        if (!err)
-               pr_warn("overlayfs: upper fs needs to support d_type.\n");
+               pr_warn("upper fs needs to support d_type.\n");
 
        /* Check if upper/work fs supports O_TMPFILE */
        temp = ovl_do_tmpfile(ofs->workdir, S_IFREG | 0);
@@ -1116,7 +1117,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
        if (ofs->tmpfile)
                dput(temp);
        else
-               pr_warn("overlayfs: upper fs does not support tmpfile.\n");
+               pr_warn("upper fs does not support tmpfile.\n");
 
        /*
         * Check if upper/work fs supports trusted.overlay.* xattr
@@ -1126,7 +1127,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
                ofs->noxattr = true;
                ofs->config.index = false;
                ofs->config.metacopy = false;
-               pr_warn("overlayfs: upper fs does not support xattr, falling back to index=off and metacopy=off.\n");
+               pr_warn("upper fs does not support xattr, falling back to index=off and metacopy=off.\n");
                err = 0;
        } else {
                vfs_removexattr(ofs->workdir, OVL_XATTR_OPAQUE);
@@ -1136,16 +1137,16 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
        fh_type = ovl_can_decode_fh(ofs->workdir->d_sb);
        if (ofs->config.index && !fh_type) {
                ofs->config.index = false;
-               pr_warn("overlayfs: upper fs does not support file handles, falling back to index=off.\n");
+               pr_warn("upper fs does not support file handles, falling back to index=off.\n");
        }
 
        /* Check if upper fs has 32bit inode numbers */
        if (fh_type != FILEID_INO32_GEN)
-               ofs->xino_bits = 0;
+               ofs->xino_mode = -1;
 
        /* NFS export of r/w mount depends on index */
        if (ofs->config.nfs_export && !ofs->config.index) {
-               pr_warn("overlayfs: NFS export requires \"index=on\", falling back to nfs_export=off.\n");
+               pr_warn("NFS export requires \"index=on\", falling back to nfs_export=off.\n");
                ofs->config.nfs_export = false;
        }
 out:
@@ -1165,11 +1166,11 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
 
        err = -EINVAL;
        if (upperpath->mnt != workpath.mnt) {
-               pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
+               pr_err("workdir and upperdir must reside under the same mount\n");
                goto out;
        }
        if (!ovl_workdir_ok(workpath.dentry, upperpath->dentry)) {
-               pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
+               pr_err("workdir and upperdir must be separate subtrees\n");
                goto out;
        }
 
@@ -1210,7 +1211,7 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
        err = ovl_verify_origin(upperpath->dentry, oe->lowerstack[0].dentry,
                                true);
        if (err) {
-               pr_err("overlayfs: failed to verify upper root origin\n");
+               pr_err("failed to verify upper root origin\n");
                goto out;
        }
 
@@ -1233,18 +1234,18 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
                        err = ovl_verify_set_fh(ofs->indexdir, OVL_XATTR_ORIGIN,
                                                upperpath->dentry, true, false);
                        if (err)
-                               pr_err("overlayfs: failed to verify index dir 'origin' xattr\n");
+                               pr_err("failed to verify index dir 'origin' xattr\n");
                }
                err = ovl_verify_upper(ofs->indexdir, upperpath->dentry, true);
                if (err)
-                       pr_err("overlayfs: failed to verify index dir 'upper' xattr\n");
+                       pr_err("failed to verify index dir 'upper' xattr\n");
 
                /* Cleanup bad/stale/orphan index entries */
                if (!err)
                        err = ovl_indexdir_cleanup(ofs);
        }
        if (err || !ofs->indexdir)
-               pr_warn("overlayfs: try deleting index dir or mounting with '-o index=off' to disable inodes index.\n");
+               pr_warn("try deleting index dir or mounting with '-o index=off' to disable inodes index.\n");
 
 out:
        mnt_drop_write(mnt);
@@ -1258,7 +1259,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
        if (!ofs->config.nfs_export && !ofs->upper_mnt)
                return true;
 
-       for (i = 0; i < ofs->numlowerfs; i++) {
+       for (i = 0; i < ofs->numfs; i++) {
                /*
                 * We use uuid to associate an overlay lower file handle with a
                 * lower layer, so we can accept lower fs with null uuid as long
@@ -1266,8 +1267,9 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
                 * if we detect multiple lower fs with the same uuid, we
                 * disable lower file handle decoding on all of them.
                 */
-               if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) {
-                       ofs->lower_fs[i].bad_uuid = true;
+               if (ofs->fs[i].is_lower &&
+                   uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) {
+                       ofs->fs[i].bad_uuid = true;
                        return false;
                }
        }
@@ -1283,13 +1285,9 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
        int err;
        bool bad_uuid = false;
 
-       /* fsid 0 is reserved for upper fs even with non upper overlay */
-       if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb)
-               return 0;
-
-       for (i = 0; i < ofs->numlowerfs; i++) {
-               if (ofs->lower_fs[i].sb == sb)
-                       return i + 1;
+       for (i = 0; i < ofs->numfs; i++) {
+               if (ofs->fs[i].sb == sb)
+                       return i;
        }
 
        if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
@@ -1297,7 +1295,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
                if (ofs->config.index || ofs->config.nfs_export) {
                        ofs->config.index = false;
                        ofs->config.nfs_export = false;
-                       pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
+                       pr_warn("%s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
                                uuid_is_null(&sb->s_uuid) ? "null" :
                                                            "conflicting",
                                path->dentry);
@@ -1306,35 +1304,59 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
 
        err = get_anon_bdev(&dev);
        if (err) {
-               pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n");
+               pr_err("failed to get anonymous bdev for lowerpath\n");
                return err;
        }
 
-       ofs->lower_fs[ofs->numlowerfs].sb = sb;
-       ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev;
-       ofs->lower_fs[ofs->numlowerfs].bad_uuid = bad_uuid;
-       ofs->numlowerfs++;
+       ofs->fs[ofs->numfs].sb = sb;
+       ofs->fs[ofs->numfs].pseudo_dev = dev;
+       ofs->fs[ofs->numfs].bad_uuid = bad_uuid;
 
-       return ofs->numlowerfs;
+       return ofs->numfs++;
 }
 
-static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
-                               struct path *stack, unsigned int numlower)
+static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
+                         struct path *stack, unsigned int numlower)
 {
        int err;
        unsigned int i;
+       struct ovl_layer *layers;
 
        err = -ENOMEM;
-       ofs->lower_layers = kcalloc(numlower, sizeof(struct ovl_layer),
-                                   GFP_KERNEL);
-       if (ofs->lower_layers == NULL)
+       layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL);
+       if (!layers)
                goto out;
+       ofs->layers = layers;
 
-       ofs->lower_fs = kcalloc(numlower, sizeof(struct ovl_sb),
-                               GFP_KERNEL);
-       if (ofs->lower_fs == NULL)
+       ofs->fs = kcalloc(numlower + 1, sizeof(struct ovl_sb), GFP_KERNEL);
+       if (ofs->fs == NULL)
                goto out;
 
+       /* idx/fsid 0 are reserved for upper fs even with lower only overlay */
+       ofs->numfs++;
+
+       layers[0].mnt = ofs->upper_mnt;
+       layers[0].idx = 0;
+       layers[0].fsid = 0;
+       ofs->numlayer = 1;
+
+       /*
+        * All lower layers that share the same fs as upper layer, use the same
+        * pseudo_dev as upper layer.  Allocate fs[0].pseudo_dev even for lower
+        * only overlay to simplify ovl_fs_free().
+        * is_lower will be set if upper fs is shared with a lower layer.
+        */
+       err = get_anon_bdev(&ofs->fs[0].pseudo_dev);
+       if (err) {
+               pr_err("failed to get anonymous bdev for upper fs\n");
+               goto out;
+       }
+
+       if (ofs->upper_mnt) {
+               ofs->fs[0].sb = ofs->upper_mnt->mnt_sb;
+               ofs->fs[0].is_lower = false;
+       }
+
        for (i = 0; i < numlower; i++) {
                struct vfsmount *mnt;
                struct inode *trap;
@@ -1357,7 +1379,7 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
                mnt = clone_private_mount(&stack[i]);
                err = PTR_ERR(mnt);
                if (IS_ERR(mnt)) {
-                       pr_err("overlayfs: failed to clone lowerpath\n");
+                       pr_err("failed to clone lowerpath\n");
                        iput(trap);
                        goto out;
                }
@@ -1368,15 +1390,13 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
                 */
                mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
 
-               ofs->lower_layers[ofs->numlower].trap = trap;
-               ofs->lower_layers[ofs->numlower].mnt = mnt;
-               ofs->lower_layers[ofs->numlower].idx = i + 1;
-               ofs->lower_layers[ofs->numlower].fsid = fsid;
-               if (fsid) {
-                       ofs->lower_layers[ofs->numlower].fs =
-                               &ofs->lower_fs[fsid - 1];
-               }
-               ofs->numlower++;
+               layers[ofs->numlayer].trap = trap;
+               layers[ofs->numlayer].mnt = mnt;
+               layers[ofs->numlayer].idx = ofs->numlayer;
+               layers[ofs->numlayer].fsid = fsid;
+               layers[ofs->numlayer].fs = &ofs->fs[fsid];
+               ofs->numlayer++;
+               ofs->fs[fsid].is_lower = true;
        }
 
        /*
@@ -1387,22 +1407,23 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
         * bits reserved for fsid, it emits a warning and uses the original
         * inode number.
         */
-       if (!ofs->numlowerfs || (ofs->numlowerfs == 1 && !ofs->upper_mnt)) {
-               ofs->xino_bits = 0;
-               ofs->config.xino = OVL_XINO_OFF;
-       } else if (ofs->config.xino == OVL_XINO_ON && !ofs->xino_bits) {
+       if (ofs->numfs - !ofs->upper_mnt == 1) {
+               if (ofs->config.xino == OVL_XINO_ON)
+                       pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
+               ofs->xino_mode = 0;
+       } else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) {
                /*
-                * This is a roundup of number of bits needed for numlowerfs+1
-                * (i.e. ilog2(numlowerfs+1 - 1) + 1). fsid 0 is reserved for
-                * upper fs even with non upper overlay.
+                * This is a roundup of number of bits needed for encoding
+                * fsid, where fsid 0 is reserved for upper fs even with
+                * lower only overlay.
                 */
                BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 31);
-               ofs->xino_bits = ilog2(ofs->numlowerfs) + 1;
+               ofs->xino_mode = ilog2(ofs->numfs - 1) + 1;
        }
 
-       if (ofs->xino_bits) {
-               pr_info("overlayfs: \"xino\" feature enabled using %d upper inode bits.\n",
-                       ofs->xino_bits);
+       if (ofs->xino_mode > 0) {
+               pr_info("\"xino\" feature enabled using %d upper inode bits.\n",
+                       ofs->xino_mode);
        }
 
        err = 0;
@@ -1428,15 +1449,15 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
        err = -EINVAL;
        stacklen = ovl_split_lowerdirs(lowertmp);
        if (stacklen > OVL_MAX_STACK) {
-               pr_err("overlayfs: too many lower directories, limit is %d\n",
+               pr_err("too many lower directories, limit is %d\n",
                       OVL_MAX_STACK);
                goto out_err;
        } else if (!ofs->config.upperdir && stacklen == 1) {
-               pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n");
+               pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n");
                goto out_err;
        } else if (!ofs->config.upperdir && ofs->config.nfs_export &&
                   ofs->config.redirect_follow) {
-               pr_warn("overlayfs: NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
+               pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
                ofs->config.nfs_export = false;
        }
 
@@ -1459,11 +1480,11 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
        err = -EINVAL;
        sb->s_stack_depth++;
        if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
-               pr_err("overlayfs: maximum fs stacking depth exceeded\n");
+               pr_err("maximum fs stacking depth exceeded\n");
                goto out_err;
        }
 
-       err = ovl_get_lower_layers(sb, ofs, stack, numlower);
+       err = ovl_get_layers(sb, ofs, stack, numlower);
        if (err)
                goto out_err;
 
@@ -1474,7 +1495,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
 
        for (i = 0; i < numlower; i++) {
                oe->lowerstack[i].dentry = dget(stack[i].dentry);
-               oe->lowerstack[i].layer = &ofs->lower_layers[i];
+               oe->lowerstack[i].layer = &ofs->layers[i+1];
        }
 
        if (remote)
@@ -1515,7 +1536,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
        while (!err && parent != next) {
                if (ovl_lookup_trap_inode(sb, parent)) {
                        err = -ELOOP;
-                       pr_err("overlayfs: overlapping %s path\n", name);
+                       pr_err("overlapping %s path\n", name);
                } else if (ovl_is_inuse(parent)) {
                        err = ovl_report_in_use(ofs, name);
                }
@@ -1555,9 +1576,9 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
                        return err;
        }
 
-       for (i = 0; i < ofs->numlower; i++) {
+       for (i = 1; i < ofs->numlayer; i++) {
                err = ovl_check_layer(sb, ofs,
-                                     ofs->lower_layers[i].mnt->mnt_root,
+                                     ofs->layers[i].mnt->mnt_root,
                                      "lowerdir");
                if (err)
                        return err;
@@ -1595,7 +1616,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        err = -EINVAL;
        if (!ofs->config.lowerdir) {
                if (!silent)
-                       pr_err("overlayfs: missing 'lowerdir'\n");
+                       pr_err("missing 'lowerdir'\n");
                goto out_err;
        }
 
@@ -1603,14 +1624,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        /* Assume underlaying fs uses 32bit inodes unless proven otherwise */
        if (ofs->config.xino != OVL_XINO_OFF)
-               ofs->xino_bits = BITS_PER_LONG - 32;
+               ofs->xino_mode = BITS_PER_LONG - 32;
 
        /* alloc/destroy_inode needed for setting up traps in inode cache */
        sb->s_op = &ovl_super_operations;
 
        if (ofs->config.upperdir) {
                if (!ofs->config.workdir) {
-                       pr_err("overlayfs: missing 'workdir'\n");
+                       pr_err("missing 'workdir'\n");
                        goto out_err;
                }
 
@@ -1660,13 +1681,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        if (!ofs->indexdir) {
                ofs->config.index = false;
                if (ofs->upper_mnt && ofs->config.nfs_export) {
-                       pr_warn("overlayfs: NFS export requires an index dir, falling back to nfs_export=off.\n");
+                       pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n");
                        ofs->config.nfs_export = false;
                }
        }
 
        if (ofs->config.metacopy && ofs->config.nfs_export) {
-               pr_warn("overlayfs: NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n");
+               pr_warn("NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n");
                ofs->config.nfs_export = false;
        }
 
@@ -1749,9 +1770,15 @@ static int __init ovl_init(void)
        if (ovl_inode_cachep == NULL)
                return -ENOMEM;
 
-       err = register_filesystem(&ovl_fs_type);
-       if (err)
-               kmem_cache_destroy(ovl_inode_cachep);
+       err = ovl_aio_request_cache_init();
+       if (!err) {
+               err = register_filesystem(&ovl_fs_type);
+               if (!err)
+                       return 0;
+
+               ovl_aio_request_cache_destroy();
+       }
+       kmem_cache_destroy(ovl_inode_cachep);
 
        return err;
 }
@@ -1766,7 +1793,7 @@ static void __exit ovl_exit(void)
         */
        rcu_barrier();
        kmem_cache_destroy(ovl_inode_cachep);
-
+       ovl_aio_request_cache_destroy();
 }
 
 module_init(ovl_init);
index f5678a3..ea00508 100644 (file)
@@ -40,18 +40,6 @@ const struct cred *ovl_override_creds(struct super_block *sb)
        return override_creds(ofs->creator_cred);
 }
 
-struct super_block *ovl_same_sb(struct super_block *sb)
-{
-       struct ovl_fs *ofs = sb->s_fs_info;
-
-       if (!ofs->numlowerfs)
-               return ofs->upper_mnt->mnt_sb;
-       else if (ofs->numlowerfs == 1 && !ofs->upper_mnt)
-               return ofs->lower_fs[0].sb;
-       else
-               return NULL;
-}
-
 /*
  * Check if underlying fs supports file handles and try to determine encoding
  * type, in order to deduce maximum inode number used by fs.
@@ -198,7 +186,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry)
        return oe->numlower ? oe->lowerstack[0].dentry : NULL;
 }
 
-struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
+const struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
 {
        struct ovl_entry *oe = dentry->d_fsdata;
 
@@ -576,7 +564,7 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
        err = ovl_do_setxattr(upperdentry, name, value, size, 0);
 
        if (err == -EOPNOTSUPP) {
-               pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
+               pr_warn("cannot set %s xattr on upper\n", name);
                ofs->noxattr = true;
                return xerr;
        }
@@ -700,7 +688,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
 
        inode = d_inode(upperdentry);
        if (!S_ISDIR(inode->i_mode) && inode->i_nlink != 1) {
-               pr_warn_ratelimited("overlayfs: cleanup linked index (%pd2, ino=%lu, nlink=%u)\n",
+               pr_warn_ratelimited("cleanup linked index (%pd2, ino=%lu, nlink=%u)\n",
                                    upperdentry, inode->i_ino, inode->i_nlink);
                /*
                 * We either have a bug with persistent union nlink or a lower
@@ -739,7 +727,7 @@ out:
        return;
 
 fail:
-       pr_err("overlayfs: cleanup index of '%pd2' failed (%i)\n", dentry, err);
+       pr_err("cleanup index of '%pd2' failed (%i)\n", dentry, err);
        goto out;
 }
 
@@ -830,7 +818,7 @@ int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
 err_unlock:
        unlock_rename(workdir, upperdir);
 err:
-       pr_err("overlayfs: failed to lock workdir+upperdir\n");
+       pr_err("failed to lock workdir+upperdir\n");
        return -EIO;
 }
 
@@ -852,7 +840,7 @@ int ovl_check_metacopy_xattr(struct dentry *dentry)
 
        return 1;
 out:
-       pr_warn_ratelimited("overlayfs: failed to get metacopy (%i)\n", res);
+       pr_warn_ratelimited("failed to get metacopy (%i)\n", res);
        return res;
 }
 
@@ -899,7 +887,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
        return res;
 
 fail:
-       pr_warn_ratelimited("overlayfs: failed to get xattr %s: err=%zi)\n",
+       pr_warn_ratelimited("failed to get xattr %s: err=%zi)\n",
                            name, res);
        kfree(buf);
        return res;
@@ -931,7 +919,7 @@ char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
 
        return buf;
 invalid:
-       pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
+       pr_warn_ratelimited("invalid redirect (%s)\n", buf);
        res = -EINVAL;
        kfree(buf);
        return ERR_PTR(res);
index ead487e..bd08616 100644 (file)
@@ -33,3 +33,4 @@ proc-$(CONFIG_PROC_KCORE)     += kcore.o
 proc-$(CONFIG_PROC_VMCORE)     += vmcore.o
 proc-$(CONFIG_PRINTK)  += kmsg.o
 proc-$(CONFIG_PROC_PAGE_MONITOR)       += page.o
+proc-$(CONFIG_BOOT_CONFIG)     += bootconfig.o
diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
new file mode 100644 (file)
index 0000000..9955d75
--- /dev/null
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * /proc/bootconfig - Extra boot configuration
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/bootconfig.h>
+#include <linux/slab.h>
+
+static char *saved_boot_config;
+
+static int boot_config_proc_show(struct seq_file *m, void *v)
+{
+       if (saved_boot_config)
+               seq_puts(m, saved_boot_config);
+       return 0;
+}
+
+/* Rest size of buffer */
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+/* Return the needed total length if @size is 0 */
+static int __init copy_xbc_key_value_list(char *dst, size_t size)
+{
+       struct xbc_node *leaf, *vnode;
+       const char *val;
+       char *key, *end = dst + size;
+       int ret = 0;
+
+       key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
+
+       xbc_for_each_key_value(leaf, val) {
+               ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
+               if (ret < 0)
+                       break;
+               ret = snprintf(dst, rest(dst, end), "%s = ", key);
+               if (ret < 0)
+                       break;
+               dst += ret;
+               vnode = xbc_node_get_child(leaf);
+               if (vnode && xbc_node_is_array(vnode)) {
+                       xbc_array_for_each_value(vnode, val) {
+                               ret = snprintf(dst, rest(dst, end), "\"%s\"%s",
+                                       val, vnode->next ? ", " : "\n");
+                               if (ret < 0)
+                                       goto out;
+                               dst += ret;
+                       }
+               } else {
+                       ret = snprintf(dst, rest(dst, end), "\"%s\"\n", val);
+                       if (ret < 0)
+                               break;
+                       dst += ret;
+               }
+       }
+out:
+       kfree(key);
+
+       return ret < 0 ? ret : dst - (end - size);
+}
+
+static int __init proc_boot_config_init(void)
+{
+       int len;
+
+       len = copy_xbc_key_value_list(NULL, 0);
+       if (len < 0)
+               return len;
+
+       if (len > 0) {
+               saved_boot_config = kzalloc(len + 1, GFP_KERNEL);
+               if (!saved_boot_config)
+                       return -ENOMEM;
+
+               len = copy_xbc_key_value_list(saved_boot_config, len + 1);
+               if (len < 0) {
+                       kfree(saved_boot_config);
+                       return len;
+               }
+       }
+
+       proc_create_single("bootconfig", 0, NULL, boot_config_proc_show);
+
+       return 0;
+}
+fs_initcall(proc_boot_config_init);
index 96f1087..c1dea9b 100644 (file)
@@ -16,16 +16,16 @@ static int cpuinfo_open(struct inode *inode, struct file *file)
        return seq_open(file, &cpuinfo_op);
 }
 
-static const struct file_operations proc_cpuinfo_operations = {
-       .open           = cpuinfo_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops cpuinfo_proc_ops = {
+       .proc_open      = cpuinfo_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 static int __init proc_cpuinfo_init(void)
 {
-       proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations);
+       proc_create("cpuinfo", 0, NULL, &cpuinfo_proc_ops);
        return 0;
 }
 fs_initcall(proc_cpuinfo_init);
index 074e958..3faed94 100644 (file)
@@ -473,7 +473,7 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
        ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
        if (ent) {
                ent->data = data;
-               ent->proc_fops = &proc_dir_operations;
+               ent->proc_dir_ops = &proc_dir_operations;
                ent->proc_iops = &proc_dir_inode_operations;
                ent = proc_register(parent, ent);
        }
@@ -503,7 +503,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
        ent = __proc_create(&parent, name, mode, 2);
        if (ent) {
                ent->data = NULL;
-               ent->proc_fops = NULL;
+               ent->proc_dir_ops = NULL;
                ent->proc_iops = NULL;
                ent = proc_register(parent, ent);
        }
@@ -533,25 +533,23 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
 
 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
                struct proc_dir_entry *parent,
-               const struct file_operations *proc_fops, void *data)
+               const struct proc_ops *proc_ops, void *data)
 {
        struct proc_dir_entry *p;
 
-       BUG_ON(proc_fops == NULL);
-
        p = proc_create_reg(name, mode, &parent, data);
        if (!p)
                return NULL;
-       p->proc_fops = proc_fops;
+       p->proc_ops = proc_ops;
        return proc_register(parent, p);
 }
 EXPORT_SYMBOL(proc_create_data);
  
 struct proc_dir_entry *proc_create(const char *name, umode_t mode,
                                   struct proc_dir_entry *parent,
-                                  const struct file_operations *proc_fops)
+                                  const struct proc_ops *proc_ops)
 {
-       return proc_create_data(name, mode, parent, proc_fops, NULL);
+       return proc_create_data(name, mode, parent, proc_ops, NULL);
 }
 EXPORT_SYMBOL(proc_create);
 
@@ -573,11 +571,11 @@ static int proc_seq_release(struct inode *inode, struct file *file)
        return seq_release(inode, file);
 }
 
-static const struct file_operations proc_seq_fops = {
-       .open           = proc_seq_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = proc_seq_release,
+static const struct proc_ops proc_seq_ops = {
+       .proc_open      = proc_seq_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = proc_seq_release,
 };
 
 struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
@@ -589,7 +587,7 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
        p = proc_create_reg(name, mode, &parent, data);
        if (!p)
                return NULL;
-       p->proc_fops = &proc_seq_fops;
+       p->proc_ops = &proc_seq_ops;
        p->seq_ops = ops;
        p->state_size = state_size;
        return proc_register(parent, p);
@@ -603,11 +601,11 @@ static int proc_single_open(struct inode *inode, struct file *file)
        return single_open(file, de->single_show, de->data);
 }
 
-static const struct file_operations proc_single_fops = {
-       .open           = proc_single_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops proc_single_ops = {
+       .proc_open      = proc_single_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
@@ -619,7 +617,7 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
        p = proc_create_reg(name, mode, &parent, data);
        if (!p)
                return NULL;
-       p->proc_fops = &proc_single_fops;
+       p->proc_ops = &proc_single_ops;
        p->single_show = show;
        return proc_register(parent, p);
 }
index dbe43a5..6da1831 100644 (file)
@@ -163,7 +163,7 @@ static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
                pdeo->closing = true;
                spin_unlock(&pde->pde_unload_lock);
                file = pdeo->file;
-               pde->proc_fops->release(file_inode(file), file);
+               pde->proc_ops->proc_release(file_inode(file), file);
                spin_lock(&pde->pde_unload_lock);
                /* After ->release. */
                list_del(&pdeo->lh);
@@ -200,12 +200,12 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
        struct proc_dir_entry *pde = PDE(file_inode(file));
        loff_t rv = -EINVAL;
        if (use_pde(pde)) {
-               typeof_member(struct file_operations, llseek) llseek;
+               typeof_member(struct proc_ops, proc_lseek) lseek;
 
-               llseek = pde->proc_fops->llseek;
-               if (!llseek)
-                       llseek = default_llseek;
-               rv = llseek(file, offset, whence);
+               lseek = pde->proc_ops->proc_lseek;
+               if (!lseek)
+                       lseek = default_llseek;
+               rv = lseek(file, offset, whence);
                unuse_pde(pde);
        }
        return rv;
@@ -216,9 +216,9 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count,
        struct proc_dir_entry *pde = PDE(file_inode(file));
        ssize_t rv = -EIO;
        if (use_pde(pde)) {
-               typeof_member(struct file_operations, read) read;
+               typeof_member(struct proc_ops, proc_read) read;
 
-               read = pde->proc_fops->read;
+               read = pde->proc_ops->proc_read;
                if (read)
                        rv = read(file, buf, count, ppos);
                unuse_pde(pde);
@@ -231,9 +231,9 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
        struct proc_dir_entry *pde = PDE(file_inode(file));
        ssize_t rv = -EIO;
        if (use_pde(pde)) {
-               typeof_member(struct file_operations, write) write;
+               typeof_member(struct proc_ops, proc_write) write;
 
-               write = pde->proc_fops->write;
+               write = pde->proc_ops->proc_write;
                if (write)
                        rv = write(file, buf, count, ppos);
                unuse_pde(pde);
@@ -246,9 +246,9 @@ static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
        struct proc_dir_entry *pde = PDE(file_inode(file));
        __poll_t rv = DEFAULT_POLLMASK;
        if (use_pde(pde)) {
-               typeof_member(struct file_operations, poll) poll;
+               typeof_member(struct proc_ops, proc_poll) poll;
 
-               poll = pde->proc_fops->poll;
+               poll = pde->proc_ops->proc_poll;
                if (poll)
                        rv = poll(file, pts);
                unuse_pde(pde);
@@ -261,9 +261,9 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
        struct proc_dir_entry *pde = PDE(file_inode(file));
        long rv = -ENOTTY;
        if (use_pde(pde)) {
-               typeof_member(struct file_operations, unlocked_ioctl) ioctl;
+               typeof_member(struct proc_ops, proc_ioctl) ioctl;
 
-               ioctl = pde->proc_fops->unlocked_ioctl;
+               ioctl = pde->proc_ops->proc_ioctl;
                if (ioctl)
                        rv = ioctl(file, cmd, arg);
                unuse_pde(pde);
@@ -277,9 +277,9 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned
        struct proc_dir_entry *pde = PDE(file_inode(file));
        long rv = -ENOTTY;
        if (use_pde(pde)) {
-               typeof_member(struct file_operations, compat_ioctl) compat_ioctl;
+               typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
 
-               compat_ioctl = pde->proc_fops->compat_ioctl;
+               compat_ioctl = pde->proc_ops->proc_compat_ioctl;
                if (compat_ioctl)
                        rv = compat_ioctl(file, cmd, arg);
                unuse_pde(pde);
@@ -293,9 +293,9 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
        struct proc_dir_entry *pde = PDE(file_inode(file));
        int rv = -EIO;
        if (use_pde(pde)) {
-               typeof_member(struct file_operations, mmap) mmap;
+               typeof_member(struct proc_ops, proc_mmap) mmap;
 
-               mmap = pde->proc_fops->mmap;
+               mmap = pde->proc_ops->proc_mmap;
                if (mmap)
                        rv = mmap(file, vma);
                unuse_pde(pde);
@@ -312,9 +312,9 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
        unsigned long rv = -EIO;
 
        if (use_pde(pde)) {
-               typeof_member(struct file_operations, get_unmapped_area) get_area;
+               typeof_member(struct proc_ops, proc_get_unmapped_area) get_area;
 
-               get_area = pde->proc_fops->get_unmapped_area;
+               get_area = pde->proc_ops->proc_get_unmapped_area;
 #ifdef CONFIG_MMU
                if (!get_area)
                        get_area = current->mm->get_unmapped_area;
@@ -333,8 +333,8 @@ static int proc_reg_open(struct inode *inode, struct file *file)
 {
        struct proc_dir_entry *pde = PDE(inode);
        int rv = 0;
-       typeof_member(struct file_operations, open) open;
-       typeof_member(struct file_operations, release) release;
+       typeof_member(struct proc_ops, proc_open) open;
+       typeof_member(struct proc_ops, proc_release) release;
        struct pde_opener *pdeo;
 
        /*
@@ -351,7 +351,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
        if (!use_pde(pde))
                return -ENOENT;
 
-       release = pde->proc_fops->release;
+       release = pde->proc_ops->proc_release;
        if (release) {
                pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL);
                if (!pdeo) {
@@ -360,7 +360,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
                }
        }
 
-       open = pde->proc_fops->open;
+       open = pde->proc_ops->proc_open;
        if (open)
                rv = open(inode, file);
 
@@ -468,21 +468,23 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
                        inode->i_size = de->size;
                if (de->nlink)
                        set_nlink(inode, de->nlink);
-               WARN_ON(!de->proc_iops);
-               inode->i_op = de->proc_iops;
-               if (de->proc_fops) {
-                       if (S_ISREG(inode->i_mode)) {
+
+               if (S_ISREG(inode->i_mode)) {
+                       inode->i_op = de->proc_iops;
+                       inode->i_fop = &proc_reg_file_ops;
 #ifdef CONFIG_COMPAT
-                               if (!de->proc_fops->compat_ioctl)
-                                       inode->i_fop =
-                                               &proc_reg_file_ops_no_compat;
-                               else
-#endif
-                                       inode->i_fop = &proc_reg_file_ops;
-                       } else {
-                               inode->i_fop = de->proc_fops;
+                       if (!de->proc_ops->proc_compat_ioctl) {
+                               inode->i_fop = &proc_reg_file_ops_no_compat;
                        }
-               }
+#endif
+               } else if (S_ISDIR(inode->i_mode)) {
+                       inode->i_op = de->proc_iops;
+                       inode->i_fop = de->proc_dir_ops;
+               } else if (S_ISLNK(inode->i_mode)) {
+                       inode->i_op = de->proc_iops;
+                       inode->i_fop = NULL;
+               } else
+                       BUG();
        } else
               pde_put(de);
        return inode;
index 0f3b557..4158727 100644 (file)
@@ -39,7 +39,10 @@ struct proc_dir_entry {
        spinlock_t pde_unload_lock;
        struct completion *pde_unload_completion;
        const struct inode_operations *proc_iops;
-       const struct file_operations *proc_fops;
+       union {
+               const struct proc_ops *proc_ops;
+               const struct file_operations *proc_dir_ops;
+       };
        const struct dentry_operations *proc_dops;
        union {
                const struct seq_operations *seq_ops;
index e2ed8e0..8ba492d 100644 (file)
@@ -574,11 +574,11 @@ static int release_kcore(struct inode *inode, struct file *file)
        return 0;
 }
 
-static const struct file_operations proc_kcore_operations = {
-       .read           = read_kcore,
-       .open           = open_kcore,
-       .release        = release_kcore,
-       .llseek         = default_llseek,
+static const struct proc_ops kcore_proc_ops = {
+       .proc_read      = read_kcore,
+       .proc_open      = open_kcore,
+       .proc_release   = release_kcore,
+       .proc_lseek     = default_llseek,
 };
 
 /* just remember that we have to update kcore */
@@ -637,8 +637,7 @@ static void __init add_modules_range(void)
 
 static int __init proc_kcore_init(void)
 {
-       proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
-                                     &proc_kcore_operations);
+       proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
        if (!proc_root_kcore) {
                pr_err("couldn't create /proc/kcore\n");
                return 0; /* Always returns 0. */
index 4f4a2ab..ec1b7d2 100644 (file)
@@ -49,17 +49,17 @@ static __poll_t kmsg_poll(struct file *file, poll_table *wait)
 }
 
 
-static const struct file_operations proc_kmsg_operations = {
-       .read           = kmsg_read,
-       .poll           = kmsg_poll,
-       .open           = kmsg_open,
-       .release        = kmsg_release,
-       .llseek         = generic_file_llseek,
+static const struct proc_ops kmsg_proc_ops = {
+       .proc_read      = kmsg_read,
+       .proc_poll      = kmsg_poll,
+       .proc_open      = kmsg_open,
+       .proc_release   = kmsg_release,
+       .proc_lseek     = generic_file_llseek,
 };
 
 static int __init proc_kmsg_init(void)
 {
-       proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations);
+       proc_create("kmsg", S_IRUSR, NULL, &kmsg_proc_ops);
        return 0;
 }
 fs_initcall(proc_kmsg_init);
index 7c952ee..f909243 100644 (file)
 #define KPMMASK (KPMSIZE - 1)
 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
 
+static inline unsigned long get_max_dump_pfn(void)
+{
+#ifdef CONFIG_SPARSEMEM
+       /*
+        * The memmap of early sections is completely populated and marked
+        * online even if max_pfn does not fall on a section boundary -
+        * pfn_to_online_page() will succeed on all pages. Allow inspecting
+        * these memmaps.
+        */
+       return round_up(max_pfn, PAGES_PER_SECTION);
+#else
+       return max_pfn;
+#endif
+}
+
 /* /proc/kpagecount - an array exposing page counts
  *
  * Each entry is a u64 representing the corresponding
@@ -29,6 +44,7 @@
 static ssize_t kpagecount_read(struct file *file, char __user *buf,
                             size_t count, loff_t *ppos)
 {
+       const unsigned long max_dump_pfn = get_max_dump_pfn();
        u64 __user *out = (u64 __user *)buf;
        struct page *ppage;
        unsigned long src = *ppos;
@@ -37,9 +53,11 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
        u64 pcount;
 
        pfn = src / KPMSIZE;
-       count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
        if (src & KPMMASK || count & KPMMASK)
                return -EINVAL;
+       if (src >= max_dump_pfn * KPMSIZE)
+               return 0;
+       count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
 
        while (count > 0) {
                /*
@@ -71,9 +89,9 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
        return ret;
 }
 
-static const struct file_operations proc_kpagecount_operations = {
-       .llseek = mem_lseek,
-       .read = kpagecount_read,
+static const struct proc_ops kpagecount_proc_ops = {
+       .proc_lseek     = mem_lseek,
+       .proc_read      = kpagecount_read,
 };
 
 /* /proc/kpageflags - an array exposing page flags
@@ -206,6 +224,7 @@ u64 stable_page_flags(struct page *page)
 static ssize_t kpageflags_read(struct file *file, char __user *buf,
                             size_t count, loff_t *ppos)
 {
+       const unsigned long max_dump_pfn = get_max_dump_pfn();
        u64 __user *out = (u64 __user *)buf;
        struct page *ppage;
        unsigned long src = *ppos;
@@ -213,9 +232,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
        ssize_t ret = 0;
 
        pfn = src / KPMSIZE;
-       count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
        if (src & KPMMASK || count & KPMMASK)
                return -EINVAL;
+       if (src >= max_dump_pfn * KPMSIZE)
+               return 0;
+       count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
 
        while (count > 0) {
                /*
@@ -242,15 +263,16 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
        return ret;
 }
 
-static const struct file_operations proc_kpageflags_operations = {
-       .llseek = mem_lseek,
-       .read = kpageflags_read,
+static const struct proc_ops kpageflags_proc_ops = {
+       .proc_lseek     = mem_lseek,
+       .proc_read      = kpageflags_read,
 };
 
 #ifdef CONFIG_MEMCG
 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
+       const unsigned long max_dump_pfn = get_max_dump_pfn();
        u64 __user *out = (u64 __user *)buf;
        struct page *ppage;
        unsigned long src = *ppos;
@@ -259,9 +281,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
        u64 ino;
 
        pfn = src / KPMSIZE;
-       count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
        if (src & KPMMASK || count & KPMMASK)
                return -EINVAL;
+       if (src >= max_dump_pfn * KPMSIZE)
+               return 0;
+       count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
 
        while (count > 0) {
                /*
@@ -293,18 +317,18 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
        return ret;
 }
 
-static const struct file_operations proc_kpagecgroup_operations = {
-       .llseek = mem_lseek,
-       .read = kpagecgroup_read,
+static const struct proc_ops kpagecgroup_proc_ops = {
+       .proc_lseek     = mem_lseek,
+       .proc_read      = kpagecgroup_read,
 };
 #endif /* CONFIG_MEMCG */
 
 static int __init proc_page_init(void)
 {
-       proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
-       proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
+       proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
+       proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
 #ifdef CONFIG_MEMCG
-       proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations);
+       proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
 #endif
        return 0;
 }
index 76ae278..4888c52 100644 (file)
@@ -90,12 +90,12 @@ static int seq_release_net(struct inode *ino, struct file *f)
        return 0;
 }
 
-static const struct file_operations proc_net_seq_fops = {
-       .open           = seq_open_net,
-       .read           = seq_read,
-       .write          = proc_simple_write,
-       .llseek         = seq_lseek,
-       .release        = seq_release_net,
+static const struct proc_ops proc_net_seq_ops = {
+       .proc_open      = seq_open_net,
+       .proc_read      = seq_read,
+       .proc_write     = proc_simple_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release_net,
 };
 
 struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
@@ -108,7 +108,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
        if (!p)
                return NULL;
        pde_force_lookup(p);
-       p->proc_fops = &proc_net_seq_fops;
+       p->proc_ops = &proc_net_seq_ops;
        p->seq_ops = ops;
        p->state_size = state_size;
        return proc_register(parent, p);
@@ -152,7 +152,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
        if (!p)
                return NULL;
        pde_force_lookup(p);
-       p->proc_fops = &proc_net_seq_fops;
+       p->proc_ops = &proc_net_seq_ops;
        p->seq_ops = ops;
        p->state_size = state_size;
        p->write = write;
@@ -183,12 +183,12 @@ static int single_release_net(struct inode *ino, struct file *f)
        return single_release(ino, f);
 }
 
-static const struct file_operations proc_net_single_fops = {
-       .open           = single_open_net,
-       .read           = seq_read,
-       .write          = proc_simple_write,
-       .llseek         = seq_lseek,
-       .release        = single_release_net,
+static const struct proc_ops proc_net_single_ops = {
+       .proc_open      = single_open_net,
+       .proc_read      = seq_read,
+       .proc_write     = proc_simple_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release_net,
 };
 
 struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
@@ -201,7 +201,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
        if (!p)
                return NULL;
        pde_force_lookup(p);
-       p->proc_fops = &proc_net_single_fops;
+       p->proc_ops = &proc_net_single_ops;
        p->single_show = show;
        return proc_register(parent, p);
 }
@@ -244,7 +244,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
        if (!p)
                return NULL;
        pde_force_lookup(p);
-       p->proc_fops = &proc_net_single_fops;
+       p->proc_ops = &proc_net_single_ops;
        p->single_show = show;
        p->write = write;
        return proc_register(parent, p);
index d80989b..c75bb46 100644 (file)
@@ -1720,7 +1720,7 @@ int __init proc_sys_init(void)
 
        proc_sys_root = proc_mkdir("sys", NULL);
        proc_sys_root->proc_iops = &proc_sys_dir_operations;
-       proc_sys_root->proc_fops = &proc_sys_dir_file_operations;
+       proc_sys_root->proc_dir_ops = &proc_sys_dir_file_operations;
        proc_sys_root->nlink = 0;
 
        return sysctl_init();
index 0b7c8df..72c07a3 100644 (file)
@@ -292,7 +292,7 @@ struct proc_dir_entry proc_root = {
        .nlink          = 2, 
        .refcnt         = REFCOUNT_INIT(1),
        .proc_iops      = &proc_root_inode_operations, 
-       .proc_fops      = &proc_root_operations,
+       .proc_dir_ops   = &proc_root_operations,
        .parent         = &proc_root,
        .subdir         = RB_ROOT,
        .name           = "/proc",
index fd931d3..0449edf 100644 (file)
@@ -223,16 +223,16 @@ static int stat_open(struct inode *inode, struct file *file)
        return single_open_size(file, show_stat, NULL, size);
 }
 
-static const struct file_operations proc_stat_operations = {
-       .open           = stat_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops stat_proc_ops = {
+       .proc_open      = stat_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 static int __init proc_stat_init(void)
 {
-       proc_create("stat", 0, NULL, &proc_stat_operations);
+       proc_create("stat", 0, NULL, &stat_proc_ops);
        return 0;
 }
 fs_initcall(proc_stat_init);
index 9442631..3ba9ae8 100644 (file)
@@ -505,7 +505,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
 
 #ifdef CONFIG_SHMEM
 static int smaps_pte_hole(unsigned long addr, unsigned long end,
-               struct mm_walk *walk)
+                         __always_unused int depth, struct mm_walk *walk)
 {
        struct mem_size_stats *mss = walk->private;
 
@@ -1282,7 +1282,7 @@ static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
 }
 
 static int pagemap_pte_hole(unsigned long start, unsigned long end,
-                               struct mm_walk *walk)
+                           __always_unused int depth, struct mm_walk *walk)
 {
        struct pagemapread *pm = walk->private;
        unsigned long addr = start;
index 7b13988..7dc800c 100644 (file)
@@ -667,10 +667,10 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 }
 #endif
 
-static const struct file_operations proc_vmcore_operations = {
-       .read           = read_vmcore,
-       .llseek         = default_llseek,
-       .mmap           = mmap_vmcore,
+static const struct proc_ops vmcore_proc_ops = {
+       .proc_read      = read_vmcore,
+       .proc_lseek     = default_llseek,
+       .proc_mmap      = mmap_vmcore,
 };
 
 static struct vmcore* __init get_new_element(void)
@@ -1555,7 +1555,7 @@ static int __init vmcore_init(void)
        elfcorehdr_free(elfcorehdr_addr);
        elfcorehdr_addr = ELFCORE_ADDR_ERR;
 
-       proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
+       proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
        if (proc_vmcore)
                proc_vmcore->size = vmcore_size;
        return 0;
index 7458fcc..59d819c 100644 (file)
@@ -939,6 +939,34 @@ out:
        return ret;
 }
 
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+                          struct iov_iter *iter)
+{
+       size_t tot_len;
+       ssize_t ret = 0;
+
+       if (!file->f_op->read_iter)
+               return -EINVAL;
+       if (!(file->f_mode & FMODE_READ))
+               return -EBADF;
+       if (!(file->f_mode & FMODE_CAN_READ))
+               return -EINVAL;
+
+       tot_len = iov_iter_count(iter);
+       if (!tot_len)
+               goto out;
+       ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
+       if (ret < 0)
+               return ret;
+
+       ret = call_read_iter(file, iocb, iter);
+out:
+       if (ret >= 0)
+               fsnotify_access(file);
+       return ret;
+}
+EXPORT_SYMBOL(vfs_iocb_iter_read);
+
 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
                rwf_t flags)
 {
@@ -975,6 +1003,34 @@ static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
        return ret;
 }
 
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+                           struct iov_iter *iter)
+{
+       size_t tot_len;
+       ssize_t ret = 0;
+
+       if (!file->f_op->write_iter)
+               return -EINVAL;
+       if (!(file->f_mode & FMODE_WRITE))
+               return -EBADF;
+       if (!(file->f_mode & FMODE_CAN_WRITE))
+               return -EINVAL;
+
+       tot_len = iov_iter_count(iter);
+       if (!tot_len)
+               return 0;
+       ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
+       if (ret < 0)
+               return ret;
+
+       ret = call_write_iter(file, iocb, iter);
+       if (ret > 0)
+               fsnotify_modify(file);
+
+       return ret;
+}
+EXPORT_SYMBOL(vfs_iocb_iter_write);
+
 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
                rwf_t flags)
 {
index d41c21f..c4ab045 100644 (file)
@@ -449,7 +449,7 @@ int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
        }
 
        link = kernfs_create_link(kobj->sd, target_name, entry);
-       if (IS_ERR(link) && PTR_ERR(link) == -EEXIST)
+       if (PTR_ERR(link) == -EEXIST)
                sysfs_warn_dup(kobj->sd, target_name);
 
        kernfs_put(entry);
index 0caa151..0ee8c6d 100644 (file)
@@ -330,7 +330,10 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
                parent = tracefs_mount->mnt_root;
 
        inode_lock(parent->d_inode);
-       dentry = lookup_one_len(name, parent, strlen(name));
+       if (unlikely(IS_DEADDIR(parent->d_inode)))
+               dentry = ERR_PTR(-ENOENT);
+       else
+               dentry = lookup_one_len(name, parent, strlen(name));
        if (!IS_ERR(dentry) && dentry->d_inode) {
                dput(dentry);
                dentry = ERR_PTR(-EEXIST);
@@ -499,122 +502,27 @@ __init struct dentry *tracefs_create_instance_dir(const char *name,
        return dentry;
 }
 
-static int __tracefs_remove(struct dentry *dentry, struct dentry *parent)
+static void remove_one(struct dentry *victim)
 {
-       int ret = 0;
-
-       if (simple_positive(dentry)) {
-               if (dentry->d_inode) {
-                       dget(dentry);
-                       switch (dentry->d_inode->i_mode & S_IFMT) {
-                       case S_IFDIR:
-                               ret = simple_rmdir(parent->d_inode, dentry);
-                               if (!ret)
-                                       fsnotify_rmdir(parent->d_inode, dentry);
-                               break;
-                       default:
-                               simple_unlink(parent->d_inode, dentry);
-                               fsnotify_unlink(parent->d_inode, dentry);
-                               break;
-                       }
-                       if (!ret)
-                               d_delete(dentry);
-                       dput(dentry);
-               }
-       }
-       return ret;
-}
-
-/**
- * tracefs_remove - removes a file or directory from the tracefs filesystem
- * @dentry: a pointer to a the dentry of the file or directory to be
- *          removed.
- *
- * This function removes a file or directory in tracefs that was previously
- * created with a call to another tracefs function (like
- * tracefs_create_file() or variants thereof.)
- */
-void tracefs_remove(struct dentry *dentry)
-{
-       struct dentry *parent;
-       int ret;
-
-       if (IS_ERR_OR_NULL(dentry))
-               return;
-
-       parent = dentry->d_parent;
-       inode_lock(parent->d_inode);
-       ret = __tracefs_remove(dentry, parent);
-       inode_unlock(parent->d_inode);
-       if (!ret)
-               simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+       simple_release_fs(&tracefs_mount, &tracefs_mount_count);
 }
 
 /**
- * tracefs_remove_recursive - recursively removes a directory
+ * tracefs_remove - recursively removes a directory
  * @dentry: a pointer to a the dentry of the directory to be removed.
  *
  * This function recursively removes a directory tree in tracefs that
  * was previously created with a call to another tracefs function
  * (like tracefs_create_file() or variants thereof.)
  */
-void tracefs_remove_recursive(struct dentry *dentry)
+void tracefs_remove(struct dentry *dentry)
 {
-       struct dentry *child, *parent;
-
        if (IS_ERR_OR_NULL(dentry))
                return;
 
-       parent = dentry;
- down:
-       inode_lock(parent->d_inode);
- loop:
-       /*
-        * The parent->d_subdirs is protected by the d_lock. Outside that
-        * lock, the child can be unlinked and set to be freed which can
-        * use the d_u.d_child as the rcu head and corrupt this list.
-        */
-       spin_lock(&parent->d_lock);
-       list_for_each_entry(child, &parent->d_subdirs, d_child) {
-               if (!simple_positive(child))
-                       continue;
-
-               /* perhaps simple_empty(child) makes more sense */
-               if (!list_empty(&child->d_subdirs)) {
-                       spin_unlock(&parent->d_lock);
-                       inode_unlock(parent->d_inode);
-                       parent = child;
-                       goto down;
-               }
-
-               spin_unlock(&parent->d_lock);
-
-               if (!__tracefs_remove(child, parent))
-                       simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-
-               /*
-                * The parent->d_lock protects agaist child from unlinking
-                * from d_subdirs. When releasing the parent->d_lock we can
-                * no longer trust that the next pointer is valid.
-                * Restart the loop. We'll skip this one with the
-                * simple_positive() check.
-                */
-               goto loop;
-       }
-       spin_unlock(&parent->d_lock);
-
-       inode_unlock(parent->d_inode);
-       child = parent;
-       parent = parent->d_parent;
-       inode_lock(parent->d_inode);
-
-       if (child != dentry)
-               /* go up */
-               goto loop;
-
-       if (!__tracefs_remove(child, parent))
-               simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-       inode_unlock(parent->d_inode);
+       simple_pin_fs(&trace_fs_type, &tracefs_mount, &tracefs_mount_count);
+       simple_recursive_removal(dentry, remove_one);
+       simple_release_fs(&tracefs_mount, &tracefs_mount_count);
 }
 
 /**
index bc4dec5..743928e 100644 (file)
@@ -1080,18 +1080,12 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr)
                inode->i_uid = attr->ia_uid;
        if (attr->ia_valid & ATTR_GID)
                inode->i_gid = attr->ia_gid;
-       if (attr->ia_valid & ATTR_ATIME) {
-               inode->i_atime = timestamp_truncate(attr->ia_atime,
-                                                 inode);
-       }
-       if (attr->ia_valid & ATTR_MTIME) {
-               inode->i_mtime = timestamp_truncate(attr->ia_mtime,
-                                                 inode);
-       }
-       if (attr->ia_valid & ATTR_CTIME) {
-               inode->i_ctime = timestamp_truncate(attr->ia_ctime,
-                                                 inode);
-       }
+       if (attr->ia_valid & ATTR_ATIME)
+               inode->i_atime = attr->ia_atime;
+       if (attr->ia_valid & ATTR_MTIME)
+               inode->i_mtime = attr->ia_mtime;
+       if (attr->ia_valid & ATTR_CTIME)
+               inode->i_ctime = attr->ia_ctime;
        if (attr->ia_valid & ATTR_MODE) {
                umode_t mode = attr->ia_mode;
 
index 17c90df..4b4b65b 100644 (file)
@@ -84,7 +84,6 @@ static int create_default_filesystem(struct ubifs_info *c)
        int idx_node_size;
        long long tmp64, main_bytes;
        __le64 tmp_le64;
-       __le32 tmp_le32;
        struct timespec64 ts;
        u8 hash[UBIFS_HASH_ARR_SZ];
        u8 hash_lpt[UBIFS_HASH_ARR_SZ];
@@ -291,16 +290,14 @@ static int create_default_filesystem(struct ubifs_info *c)
        ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
        ino->nlink = cpu_to_le32(2);
 
-       ktime_get_real_ts64(&ts);
-       ts = timespec64_trunc(ts, DEFAULT_TIME_GRAN);
+       ktime_get_coarse_real_ts64(&ts);
        tmp_le64 = cpu_to_le64(ts.tv_sec);
        ino->atime_sec   = tmp_le64;
        ino->ctime_sec   = tmp_le64;
        ino->mtime_sec   = tmp_le64;
-       tmp_le32 = cpu_to_le32(ts.tv_nsec);
-       ino->atime_nsec  = tmp_le32;
-       ino->ctime_nsec  = tmp_le32;
-       ino->mtime_nsec  = tmp_le32;
+       ino->atime_nsec  = 0;
+       ino->ctime_nsec  = 0;
+       ino->mtime_nsec  = 0;
        ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
        ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ);
 
index c952b6b..1d17ce9 100644 (file)
@@ -36,14 +36,14 @@ static int utimes_common(const struct path *path, struct timespec64 *times)
                if (times[0].tv_nsec == UTIME_OMIT)
                        newattrs.ia_valid &= ~ATTR_ATIME;
                else if (times[0].tv_nsec != UTIME_NOW) {
-                       newattrs.ia_atime = timestamp_truncate(times[0], inode);
+                       newattrs.ia_atime = times[0];
                        newattrs.ia_valid |= ATTR_ATIME_SET;
                }
 
                if (times[1].tv_nsec == UTIME_OMIT)
                        newattrs.ia_valid &= ~ATTR_MTIME;
                else if (times[1].tv_nsec != UTIME_NOW) {
-                       newattrs.ia_mtime = timestamp_truncate(times[1], inode);
+                       newattrs.ia_mtime = times[1];
                        newattrs.ia_valid |= ATTR_MTIME_SET;
                }
                /*
index 14fbdf2..08d6beb 100644 (file)
 #include "xfs_ag_resv.h"
 #include "xfs_health.h"
 
-static struct xfs_buf *
+static int
 xfs_get_aghdr_buf(
        struct xfs_mount        *mp,
        xfs_daddr_t             blkno,
        size_t                  numblks,
+       struct xfs_buf          **bpp,
        const struct xfs_buf_ops *ops)
 {
        struct xfs_buf          *bp;
+       int                     error;
 
-       bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0);
-       if (!bp)
-               return NULL;
+       error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp);
+       if (error)
+               return error;
 
        xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
        bp->b_bn = blkno;
        bp->b_maps[0].bm_bn = blkno;
        bp->b_ops = ops;
 
-       return bp;
+       *bpp = bp;
+       return 0;
 }
 
 static inline bool is_log_ag(struct xfs_mount *mp, struct aghdr_init_data *id)
@@ -340,13 +343,13 @@ xfs_ag_init_hdr(
        struct aghdr_init_data  *id,
        aghdr_init_work_f       work,
        const struct xfs_buf_ops *ops)
-
 {
        struct xfs_buf          *bp;
+       int                     error;
 
-       bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, ops);
-       if (!bp)
-               return -ENOMEM;
+       error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops);
+       if (error)
+               return error;
 
        (*work)(mp, bp, id);
 
index fc93fd8..d8053bc 100644 (file)
@@ -1070,11 +1070,11 @@ xfs_alloc_ag_vextent_small(
        if (args->datatype & XFS_ALLOC_USERDATA) {
                struct xfs_buf  *bp;
 
-               bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
-               if (XFS_IS_CORRUPT(args->mp, !bp)) {
-                       error = -EFSCORRUPTED;
+               error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
+                               XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
+                               args->mp->m_bsize, 0, &bp);
+               if (error)
                        goto error;
-               }
                xfs_trans_binval(args->tp, bp);
        }
        *fbnop = args->agbno = fbno;
@@ -2347,9 +2347,11 @@ xfs_free_agfl_block(
        if (error)
                return error;
 
-       bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
-       if (XFS_IS_CORRUPT(tp->t_mountp, !bp))
-               return -EFSCORRUPTED;
+       error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
+                       XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
+                       tp->t_mountp->m_bsize, 0, &bp);
+       if (error)
+               return error;
        xfs_trans_binval(tp, bp);
 
        return 0;
@@ -2500,12 +2502,11 @@ xfs_alloc_fix_freelist(
 
        if (!pag->pagf_init) {
                error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
-               if (error)
+               if (error) {
+                       /* Couldn't lock the AGF so skip this AG. */
+                       if (error == -EAGAIN)
+                               error = 0;
                        goto out_no_agbp;
-               if (!pag->pagf_init) {
-                       ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
-                       ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
-                       goto out_agbp_relse;
                }
        }
 
@@ -2531,11 +2532,10 @@ xfs_alloc_fix_freelist(
         */
        if (!agbp) {
                error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
-               if (error)
-                       goto out_no_agbp;
-               if (!agbp) {
-                       ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
-                       ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
+               if (error) {
+                       /* Couldn't lock the AGF so skip this AG. */
+                       if (error == -EAGAIN)
+                               error = 0;
                        goto out_no_agbp;
                }
        }
@@ -2766,11 +2766,10 @@ xfs_alloc_pagf_init(
        xfs_buf_t               *bp;
        int                     error;
 
-       if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
-               return error;
-       if (bp)
+       error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp);
+       if (!error)
                xfs_trans_brelse(tp, bp);
-       return 0;
+       return error;
 }
 
 /*
@@ -2956,14 +2955,11 @@ xfs_read_agf(
        trace_xfs_read_agf(mp, agno);
 
        ASSERT(agno != NULLAGNUMBER);
-       error = xfs_trans_read_buf(
-                       mp, tp, mp->m_ddev_targp,
+       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
                        XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
                        XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
        if (error)
                return error;
-       if (!*bpp)
-               return 0;
 
        ASSERT(!(*bpp)->b_error);
        xfs_buf_set_ref(*bpp, XFS_AGF_REF);
@@ -2987,14 +2983,15 @@ xfs_alloc_read_agf(
 
        trace_xfs_alloc_read_agf(mp, agno);
 
+       /* We don't support trylock when freeing. */
+       ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
+                       (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
        ASSERT(agno != NULLAGNUMBER);
        error = xfs_read_agf(mp, tp, agno,
                        (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
                        bpp);
        if (error)
                return error;
-       if (!*bpp)
-               return 0;
        ASSERT(!(*bpp)->b_error);
 
        agf = XFS_BUF_TO_AGF(*bpp);
index a266d05..8b7f74b 100644 (file)
@@ -418,20 +418,10 @@ xfs_attr_rmtval_get(
                               (map[i].br_startblock != HOLESTARTBLOCK));
                        dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
                        dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
-                       bp = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt, 0,
-                                       &xfs_attr3_rmt_buf_ops);
-                       if (!bp)
-                               return -ENOMEM;
-                       error = bp->b_error;
-                       if (error) {
-                               xfs_buf_ioerror_alert(bp, __func__);
-                               xfs_buf_relse(bp);
-
-                               /* bad CRC means corrupted metadata */
-                               if (error == -EFSBADCRC)
-                                       error = -EFSCORRUPTED;
+                       error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
+                                       0, &bp, &xfs_attr3_rmt_buf_ops);
+                       if (error)
                                return error;
-                       }
 
                        error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
                                                        &offset, &valuelen,
@@ -555,9 +545,9 @@ xfs_attr_rmtval_set(
                dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
                dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
 
-               bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt);
-               if (!bp)
-                       return -ENOMEM;
+               error = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, &bp);
+               if (error)
+                       return error;
                bp->b_ops = &xfs_attr3_rmt_buf_ops;
 
                xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
index 4c2e046..9a6d7a8 100644 (file)
@@ -730,11 +730,11 @@ xfs_bmap_extents_to_btree(
        cur->bc_private.b.allocated++;
        ip->i_d.di_nblocks++;
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
-       abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
-       if (XFS_IS_CORRUPT(mp, !abp)) {
-               error = -EFSCORRUPTED;
+       error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+                       XFS_FSB_TO_DADDR(mp, args.fsbno),
+                       mp->m_bsize, 0, &abp);
+       if (error)
                goto out_unreserve_dquot;
-       }
 
        /*
         * Fill in the child block.
@@ -878,7 +878,11 @@ xfs_bmap_local_to_extents(
        ASSERT(args.fsbno != NULLFSBLOCK);
        ASSERT(args.len == 1);
        tp->t_firstblock = args.fsbno;
-       bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno);
+       error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
+                       XFS_FSB_TO_DADDR(args.mp, args.fsbno),
+                       args.mp->m_bsize, 0, &bp);
+       if (error)
+               goto done;
 
        /*
         * Initialize the block, copy the data and log the remote buffer.
@@ -3307,11 +3311,12 @@ xfs_bmap_longest_free_extent(
        pag = xfs_perag_get(mp, ag);
        if (!pag->pagf_init) {
                error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
-               if (error)
-                       goto out;
-
-               if (!pag->pagf_init) {
-                       *notinit = 1;
+               if (error) {
+                       /* Couldn't lock the AGF, so skip this AG. */
+                       if (error == -EAGAIN) {
+                               *notinit = 1;
+                               error = 0;
+                       }
                        goto out;
                }
        }
index b22c7e9..fd300dc 100644 (file)
@@ -678,42 +678,6 @@ xfs_btree_get_block(
        return XFS_BUF_TO_BLOCK(*bpp);
 }
 
-/*
- * Get a buffer for the block, return it with no data read.
- * Long-form addressing.
- */
-xfs_buf_t *                            /* buffer for fsbno */
-xfs_btree_get_bufl(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_fsblock_t   fsbno)          /* file system block number */
-{
-       xfs_daddr_t             d;              /* real disk block address */
-
-       ASSERT(fsbno != NULLFSBLOCK);
-       d = XFS_FSB_TO_DADDR(mp, fsbno);
-       return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, 0);
-}
-
-/*
- * Get a buffer for the block, return it with no data read.
- * Short-form addressing.
- */
-xfs_buf_t *                            /* buffer for agno/agbno */
-xfs_btree_get_bufs(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_agnumber_t  agno,           /* allocation group number */
-       xfs_agblock_t   agbno)          /* allocation group block number */
-{
-       xfs_daddr_t             d;              /* real disk block address */
-
-       ASSERT(agno != NULLAGNUMBER);
-       ASSERT(agbno != NULLAGBLOCK);
-       d = XFS_AGB_TO_DADDR(mp, agno, agbno);
-       return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, 0);
-}
-
 /*
  * Change the cursor to point to the first record at the given level.
  * Other levels are unaffected.
@@ -1270,11 +1234,10 @@ xfs_btree_get_buf_block(
        error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
        if (error)
                return error;
-       *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
-                                mp->m_bsize, 0);
-
-       if (!*bpp)
-               return -ENOMEM;
+       error = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, mp->m_bsize,
+                       0, bpp);
+       if (error)
+               return error;
 
        (*bpp)->b_ops = cur->bc_ops->buf_ops;
        *block = XFS_BUF_TO_BLOCK(*bpp);
index fb9b212..3eff7c3 100644 (file)
@@ -296,27 +296,6 @@ xfs_btree_dup_cursor(
        xfs_btree_cur_t         *cur,   /* input cursor */
        xfs_btree_cur_t         **ncur);/* output cursor */
 
-/*
- * Get a buffer for the block, return it with no data read.
- * Long-form addressing.
- */
-struct xfs_buf *                               /* buffer for fsbno */
-xfs_btree_get_bufl(
-       struct xfs_mount        *mp,    /* file system mount point */
-       struct xfs_trans        *tp,    /* transaction pointer */
-       xfs_fsblock_t           fsbno); /* file system block number */
-
-/*
- * Get a buffer for the block, return it with no data read.
- * Short-form addressing.
- */
-struct xfs_buf *                               /* buffer for agno/agbno */
-xfs_btree_get_bufs(
-       struct xfs_mount        *mp,    /* file system mount point */
-       struct xfs_trans        *tp,    /* transaction pointer */
-       xfs_agnumber_t          agno,   /* allocation group number */
-       xfs_agblock_t           agbno); /* allocation group block number */
-
 /*
  * Compute first and last byte offsets for the fields given.
  * Interprets the offsets table, which contains struct field offsets.
index 8c3eafe..875e04f 100644 (file)
@@ -2591,13 +2591,9 @@ xfs_da_get_buf(
        if (error || nmap == 0)
                goto out_free;
 
-       bp = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0);
-       error = bp ? bp->b_error : -EIO;
-       if (error) {
-               if (bp)
-                       xfs_trans_brelse(tp, bp);
+       error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp);
+       if (error)
                goto out_free;
-       }
 
        *bpp = bp;
 
index 5b759af..bf161e9 100644 (file)
@@ -276,6 +276,7 @@ xfs_ialloc_inode_init(
        int                     i, j;
        xfs_daddr_t             d;
        xfs_ino_t               ino = 0;
+       int                     error;
 
        /*
         * Loop over the new block(s), filling in the inodes.  For small block
@@ -327,12 +328,11 @@ xfs_ialloc_inode_init(
                 */
                d = XFS_AGB_TO_DADDR(mp, agno, agbno +
                                (j * M_IGEO(mp)->blocks_per_cluster));
-               fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-                                        mp->m_bsize *
-                                        M_IGEO(mp)->blocks_per_cluster,
-                                        XBF_UNMAPPED);
-               if (!fbuf)
-                       return -ENOMEM;
+               error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+                               mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
+                               XBF_UNMAPPED, &fbuf);
+               if (error)
+                       return error;
 
                /* Initialize the inode buffers and log them appropriately. */
                fbuf->b_ops = &xfs_inode_buf_ops;
index d7d702e..6e1665f 100644 (file)
@@ -1177,8 +1177,6 @@ xfs_refcount_finish_one(
                                XFS_ALLOC_FLAG_FREEING, &agbp);
                if (error)
                        return error;
-               if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
-                       return -EFSCORRUPTED;
 
                rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
                if (!rcur) {
@@ -1718,10 +1716,6 @@ xfs_refcount_recover_cow_leftovers(
        error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
        if (error)
                goto out_trans;
-       if (!agbp) {
-               error = -ENOMEM;
-               goto out_trans;
-       }
        cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
 
        /* Find all the leftover CoW staging extents. */
index 0ac6975..2f60fc3 100644 (file)
@@ -985,9 +985,9 @@ xfs_update_secondary_sbs(
        for (agno = 1; agno < mp->m_sb.sb_agcount; agno++) {
                struct xfs_buf          *bp;
 
-               bp = xfs_buf_get(mp->m_ddev_targp,
+               error = xfs_buf_get(mp->m_ddev_targp,
                                 XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
-                                XFS_FSS_TO_BB(mp, 1));
+                                XFS_FSS_TO_BB(mp, 1), &bp);
                /*
                 * If we get an error reading or writing alternate superblocks,
                 * continue.  xfs_repair chooses the "best" superblock based
@@ -995,12 +995,12 @@ xfs_update_secondary_sbs(
                 * superblocks un-updated than updated, and xfs_repair may
                 * pick them over the properly-updated primary.
                 */
-               if (!bp) {
+               if (error) {
                        xfs_warn(mp,
                "error allocating secondary superblock for ag %d",
                                agno);
                        if (!saved_error)
-                               saved_error = -ENOMEM;
+                               saved_error = error;
                        continue;
                }
 
@@ -1185,13 +1185,14 @@ xfs_sb_get_secondary(
        struct xfs_buf          **bpp)
 {
        struct xfs_buf          *bp;
+       int                     error;
 
        ASSERT(agno != 0 && agno != NULLAGNUMBER);
-       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+       error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
                        XFS_AG_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
-                       XFS_FSS_TO_BB(mp, 1), 0);
-       if (!bp)
-               return -ENOMEM;
+                       XFS_FSS_TO_BB(mp, 1), 0, &bp);
+       if (error)
+               return error;
        bp->b_ops = &xfs_sb_buf_ops;
        xfs_buf_oneshot(bp);
        *bpp = bp;
index 7a1a38b..d5e6db9 100644 (file)
@@ -659,8 +659,6 @@ xrep_agfl(
        error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
        if (error)
                return error;
-       if (!agf_bp)
-               return -ENOMEM;
 
        /*
         * Make sure we have the AGFL buffer, as scrub might have decided it
@@ -735,8 +733,6 @@ xrep_agi_find_btrees(
        error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
        if (error)
                return error;
-       if (!agf_bp)
-               return -ENOMEM;
 
        /* Find the btree roots. */
        error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
index 7251c66..ec2064e 100644 (file)
@@ -83,9 +83,6 @@ xchk_fscount_warmup(
                error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp);
                if (error)
                        break;
-               error = -ENOMEM;
-               if (!agf_bp || !agi_bp)
-                       break;
 
                /*
                 * These are supposed to be initialized by the header read
index b70a88b..e489d7a 100644 (file)
@@ -341,13 +341,17 @@ xrep_init_btblock(
        struct xfs_trans                *tp = sc->tp;
        struct xfs_mount                *mp = sc->mp;
        struct xfs_buf                  *bp;
+       int                             error;
 
        trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
                        XFS_FSB_TO_AGBNO(mp, fsb), btnum);
 
        ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
-       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, fsb),
-                       XFS_FSB_TO_BB(mp, 1), 0);
+       error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+                       XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0,
+                       &bp);
+       if (error)
+               return error;
        xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
        xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.agno);
        xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
@@ -542,8 +546,6 @@ xrep_reap_block(
                error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf_bp);
                if (error)
                        return error;
-               if (!agf_bp)
-                       return -ENOMEM;
        } else {
                agf_bp = sc->sa.agf_bp;
        }
index 8fbb841..bbfa6ba 100644 (file)
@@ -205,11 +205,12 @@ xfs_attr3_node_inactive(
                /*
                 * Remove the subsidiary block from the cache and from the log.
                 */
-               child_bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
+               error = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
                                child_blkno,
-                               XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
-               if (!child_bp)
-                       return -EIO;
+                               XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0,
+                               &child_bp);
+               if (error)
+                       return error;
                error = bp->b_error;
                if (error) {
                        xfs_trans_brelse(*trans, child_bp);
@@ -298,10 +299,10 @@ xfs_attr3_root_inactive(
        /*
         * Invalidate the incore copy of the root block.
         */
-       bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
-                       XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
-       if (!bp)
-               return -EIO;
+       error = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
+                       XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp);
+       if (error)
+               return error;
        error = bp->b_error;
        if (error) {
                xfs_trans_brelse(*trans, bp);
index a0229c3..217e4f8 100644 (file)
@@ -198,20 +198,22 @@ xfs_buf_free_maps(
        }
 }
 
-static struct xfs_buf *
+static int
 _xfs_buf_alloc(
        struct xfs_buftarg      *target,
        struct xfs_buf_map      *map,
        int                     nmaps,
-       xfs_buf_flags_t         flags)
+       xfs_buf_flags_t         flags,
+       struct xfs_buf          **bpp)
 {
        struct xfs_buf          *bp;
        int                     error;
        int                     i;
 
+       *bpp = NULL;
        bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
        if (unlikely(!bp))
-               return NULL;
+               return -ENOMEM;
 
        /*
         * We don't want certain flags to appear in b_flags unless they are
@@ -239,7 +241,7 @@ _xfs_buf_alloc(
        error = xfs_buf_get_maps(bp, nmaps);
        if (error)  {
                kmem_cache_free(xfs_buf_zone, bp);
-               return NULL;
+               return error;
        }
 
        bp->b_bn = map[0].bm_bn;
@@ -256,7 +258,8 @@ _xfs_buf_alloc(
        XFS_STATS_INC(bp->b_mount, xb_create);
        trace_xfs_buf_init(bp, _RET_IP_);
 
-       return bp;
+       *bpp = bp;
+       return 0;
 }
 
 /*
@@ -682,53 +685,39 @@ xfs_buf_incore(
  * cache hits, as metadata intensive workloads will see 3 orders of magnitude
  * more hits than misses.
  */
-struct xfs_buf *
+int
 xfs_buf_get_map(
        struct xfs_buftarg      *target,
        struct xfs_buf_map      *map,
        int                     nmaps,
-       xfs_buf_flags_t         flags)
+       xfs_buf_flags_t         flags,
+       struct xfs_buf          **bpp)
 {
        struct xfs_buf          *bp;
        struct xfs_buf          *new_bp;
        int                     error = 0;
 
+       *bpp = NULL;
        error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
-
-       switch (error) {
-       case 0:
-               /* cache hit */
+       if (!error)
                goto found;
-       case -EAGAIN:
-               /* cache hit, trylock failure, caller handles failure */
-               ASSERT(flags & XBF_TRYLOCK);
-               return NULL;
-       case -ENOENT:
-               /* cache miss, go for insert */
-               break;
-       case -EFSCORRUPTED:
-       default:
-               /*
-                * None of the higher layers understand failure types
-                * yet, so return NULL to signal a fatal lookup error.
-                */
-               return NULL;
-       }
+       if (error != -ENOENT)
+               return error;
 
-       new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
-       if (unlikely(!new_bp))
-               return NULL;
+       error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
+       if (error)
+               return error;
 
        error = xfs_buf_allocate_memory(new_bp, flags);
        if (error) {
                xfs_buf_free(new_bp);
-               return NULL;
+               return error;
        }
 
        error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
        if (error) {
                xfs_buf_free(new_bp);
-               return NULL;
+               return error;
        }
 
        if (bp != new_bp)
@@ -741,7 +730,7 @@ found:
                        xfs_warn(target->bt_mount,
                                "%s: failed to map pagesn", __func__);
                        xfs_buf_relse(bp);
-                       return NULL;
+                       return error;
                }
        }
 
@@ -754,7 +743,8 @@ found:
 
        XFS_STATS_INC(target->bt_mount, xb_get);
        trace_xfs_buf_get(bp, flags, _RET_IP_);
-       return bp;
+       *bpp = bp;
+       return 0;
 }
 
 STATIC int
@@ -806,46 +796,77 @@ xfs_buf_reverify(
        return bp->b_error;
 }
 
-xfs_buf_t *
+int
 xfs_buf_read_map(
        struct xfs_buftarg      *target,
        struct xfs_buf_map      *map,
        int                     nmaps,
        xfs_buf_flags_t         flags,
-       const struct xfs_buf_ops *ops)
+       struct xfs_buf          **bpp,
+       const struct xfs_buf_ops *ops,
+       xfs_failaddr_t          fa)
 {
        struct xfs_buf          *bp;
+       int                     error;
 
        flags |= XBF_READ;
+       *bpp = NULL;
 
-       bp = xfs_buf_get_map(target, map, nmaps, flags);
-       if (!bp)
-               return NULL;
+       error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+       if (error)
+               return error;
 
        trace_xfs_buf_read(bp, flags, _RET_IP_);
 
        if (!(bp->b_flags & XBF_DONE)) {
+               /* Initiate the buffer read and wait. */
                XFS_STATS_INC(target->bt_mount, xb_get_read);
                bp->b_ops = ops;
-               _xfs_buf_read(bp, flags);
-               return bp;
+               error = _xfs_buf_read(bp, flags);
+
+               /* Readahead iodone already dropped the buffer, so exit. */
+               if (flags & XBF_ASYNC)
+                       return 0;
+       } else {
+               /* Buffer already read; all we need to do is check it. */
+               error = xfs_buf_reverify(bp, ops);
+
+               /* Readahead already finished; drop the buffer and exit. */
+               if (flags & XBF_ASYNC) {
+                       xfs_buf_relse(bp);
+                       return 0;
+               }
+
+               /* We do not want read in the flags */
+               bp->b_flags &= ~XBF_READ;
+               ASSERT(bp->b_ops != NULL || ops == NULL);
        }
 
-       xfs_buf_reverify(bp, ops);
+       /*
+        * If we've had a read error, then the contents of the buffer are
+        * invalid and should not be used. To ensure that a followup read tries
+        * to pull the buffer from disk again, we clear the XBF_DONE flag and
+        * mark the buffer stale. This ensures that anyone who has a current
+        * reference to the buffer will interpret it's contents correctly and
+        * future cache lookups will also treat it as an empty, uninitialised
+        * buffer.
+        */
+       if (error) {
+               if (!XFS_FORCED_SHUTDOWN(target->bt_mount))
+                       xfs_buf_ioerror_alert(bp, fa);
 
-       if (flags & XBF_ASYNC) {
-               /*
-                * Read ahead call which is already satisfied,
-                * drop the buffer
-                */
+               bp->b_flags &= ~XBF_DONE;
+               xfs_buf_stale(bp);
                xfs_buf_relse(bp);
-               return NULL;
+
+               /* bad CRC means corrupted metadata */
+               if (error == -EFSBADCRC)
+                       error = -EFSCORRUPTED;
+               return error;
        }
 
-       /* We do not want read in the flags */
-       bp->b_flags &= ~XBF_READ;
-       ASSERT(bp->b_ops != NULL || ops == NULL);
-       return bp;
+       *bpp = bp;
+       return 0;
 }
 
 /*
@@ -859,11 +880,14 @@ xfs_buf_readahead_map(
        int                     nmaps,
        const struct xfs_buf_ops *ops)
 {
+       struct xfs_buf          *bp;
+
        if (bdi_read_congested(target->bt_bdev->bd_bdi))
                return;
 
        xfs_buf_read_map(target, map, nmaps,
-                    XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
+                    XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
+                    __this_address);
 }
 
 /*
@@ -880,12 +904,13 @@ xfs_buf_read_uncached(
        const struct xfs_buf_ops *ops)
 {
        struct xfs_buf          *bp;
+       int                     error;
 
        *bpp = NULL;
 
-       bp = xfs_buf_get_uncached(target, numblks, flags);
-       if (!bp)
-               return -ENOMEM;
+       error = xfs_buf_get_uncached(target, numblks, flags, &bp);
+       if (error)
+               return error;
 
        /* set up the buffer for a read IO */
        ASSERT(bp->b_map_count == 1);
@@ -896,7 +921,7 @@ xfs_buf_read_uncached(
 
        xfs_buf_submit(bp);
        if (bp->b_error) {
-               int     error = bp->b_error;
+               error = bp->b_error;
                xfs_buf_relse(bp);
                return error;
        }
@@ -905,20 +930,23 @@ xfs_buf_read_uncached(
        return 0;
 }
 
-xfs_buf_t *
+int
 xfs_buf_get_uncached(
        struct xfs_buftarg      *target,
        size_t                  numblks,
-       int                     flags)
+       int                     flags,
+       struct xfs_buf          **bpp)
 {
        unsigned long           page_count;
        int                     error, i;
        struct xfs_buf          *bp;
        DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
 
+       *bpp = NULL;
+
        /* flags might contain irrelevant bits, pass only what we care about */
-       bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
-       if (unlikely(bp == NULL))
+       error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
+       if (error)
                goto fail;
 
        page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
@@ -928,8 +956,10 @@ xfs_buf_get_uncached(
 
        for (i = 0; i < page_count; i++) {
                bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
-               if (!bp->b_pages[i])
+               if (!bp->b_pages[i]) {
+                       error = -ENOMEM;
                        goto fail_free_mem;
+               }
        }
        bp->b_flags |= _XBF_PAGES;
 
@@ -941,7 +971,8 @@ xfs_buf_get_uncached(
        }
 
        trace_xfs_buf_get_uncached(bp, _RET_IP_);
-       return bp;
+       *bpp = bp;
+       return 0;
 
  fail_free_mem:
        while (--i >= 0)
@@ -951,7 +982,7 @@ xfs_buf_get_uncached(
        xfs_buf_free_maps(bp);
        kmem_cache_free(xfs_buf_zone, bp);
  fail:
-       return NULL;
+       return error;
 }
 
 /*
@@ -1205,10 +1236,10 @@ __xfs_buf_ioerror(
 void
 xfs_buf_ioerror_alert(
        struct xfs_buf          *bp,
-       const char              *func)
+       xfs_failaddr_t          func)
 {
        xfs_alert(bp->b_mount,
-"metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
+"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
                        func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
                        -bp->b_error);
 }
index 56e081d..d79a1fe 100644 (file)
@@ -192,37 +192,40 @@ struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
                           xfs_daddr_t blkno, size_t numblks,
                           xfs_buf_flags_t flags);
 
-struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
-                              struct xfs_buf_map *map, int nmaps,
-                              xfs_buf_flags_t flags);
-struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
-                              struct xfs_buf_map *map, int nmaps,
-                              xfs_buf_flags_t flags,
-                              const struct xfs_buf_ops *ops);
+int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
+               int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
+int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
+               int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
+               const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
 void xfs_buf_readahead_map(struct xfs_buftarg *target,
                               struct xfs_buf_map *map, int nmaps,
                               const struct xfs_buf_ops *ops);
 
-static inline struct xfs_buf *
+static inline int
 xfs_buf_get(
        struct xfs_buftarg      *target,
        xfs_daddr_t             blkno,
-       size_t                  numblks)
+       size_t                  numblks,
+       struct xfs_buf          **bpp)
 {
        DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
-       return xfs_buf_get_map(target, &map, 1, 0);
+
+       return xfs_buf_get_map(target, &map, 1, 0, bpp);
 }
 
-static inline struct xfs_buf *
+static inline int
 xfs_buf_read(
        struct xfs_buftarg      *target,
        xfs_daddr_t             blkno,
        size_t                  numblks,
        xfs_buf_flags_t         flags,
+       struct xfs_buf          **bpp,
        const struct xfs_buf_ops *ops)
 {
        DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
-       return xfs_buf_read_map(target, &map, 1, flags, ops);
+
+       return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
+                       __builtin_return_address(0));
 }
 
 static inline void
@@ -236,8 +239,8 @@ xfs_buf_readahead(
        return xfs_buf_readahead_map(target, &map, 1, ops);
 }
 
-struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
-                               int flags);
+int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags,
+               struct xfs_buf **bpp);
 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
                          size_t numblks, int flags, struct xfs_buf **bpp,
                          const struct xfs_buf_ops *ops);
@@ -259,7 +262,7 @@ extern void xfs_buf_ioend(struct xfs_buf *bp);
 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
                xfs_failaddr_t failaddr);
 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
-extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
+extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
 
 extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
 static inline int xfs_buf_submit(struct xfs_buf *bp)
index 5be8973..663810e 100644 (file)
@@ -1113,7 +1113,7 @@ xfs_buf_iodone_callback_error(
        if (bp->b_target != lasttarg ||
            time_after(jiffies, (lasttime + 5*HZ))) {
                lasttime = jiffies;
-               xfs_buf_ioerror_alert(bp, __func__);
+               xfs_buf_ioerror_alert(bp, __this_address);
        }
        lasttarg = bp->b_target;
 
index cae6136..0b8350e 100644 (file)
@@ -45,7 +45,7 @@ xfs_trim_extents(
        xfs_log_force(mp, XFS_LOG_SYNC);
 
        error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
-       if (error || !agbp)
+       if (error)
                goto out_put_perag;
 
        cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
index 9cfd320..d223e1a 100644 (file)
@@ -320,10 +320,10 @@ xfs_dquot_disk_alloc(
        dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 
        /* now we can just get the buffer (there's nothing to read yet) */
-       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
-                       mp->m_quotainfo->qi_dqchunklen, 0);
-       if (!bp)
-               return -ENOMEM;
+       error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
+                       mp->m_quotainfo->qi_dqchunklen, 0, &bp);
+       if (error)
+               return error;
        bp->b_ops = &xfs_dquot_buf_ops;
 
        /*
index 5f12b5d..1a88025 100644 (file)
@@ -159,16 +159,15 @@ xfs_filestream_pick_ag(
 
                if (!pag->pagf_init) {
                        err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
-                       if (err && !trylock) {
+                       if (err) {
                                xfs_perag_put(pag);
-                               return err;
+                               if (err != -EAGAIN)
+                                       return err;
+                               /* Couldn't lock the AGF, skip this AG. */
+                               continue;
                        }
                }
 
-               /* Might fail sometimes during the 1st pass with trylock set. */
-               if (!pag->pagf_init)
-                       goto next_ag;
-
                /* Keep track of the AG with the most free blocks. */
                if (pag->pagf_freeblks > maxfree) {
                        maxfree = pag->pagf_freeblks;
index 1979a00..c5077e6 100644 (file)
@@ -2546,6 +2546,7 @@ xfs_ifree_cluster(
        struct xfs_perag        *pag;
        struct xfs_ino_geometry *igeo = M_IGEO(mp);
        xfs_ino_t               inum;
+       int                     error;
 
        inum = xic->first_ino;
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
@@ -2574,12 +2575,11 @@ xfs_ifree_cluster(
                 * complete before we get a lock on it, and hence we may fail
                 * to mark all the active inodes on the buffer stale.
                 */
-               bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
-                                       mp->m_bsize * igeo->blocks_per_cluster,
-                                       XBF_UNMAPPED);
-
-               if (!bp)
-                       return -ENOMEM;
+               error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
+                               mp->m_bsize * igeo->blocks_per_cluster,
+                               XBF_UNMAPPED, &bp);
+               if (error)
+                       return error;
 
                /*
                 * This buffer may not have been correctly initialised as we
index 0d683fb..25cfc85 100644 (file)
@@ -294,7 +294,7 @@ xlog_recover_iodone(
                 * this during recovery. One strike!
                 */
                if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
-                       xfs_buf_ioerror_alert(bp, __func__);
+                       xfs_buf_ioerror_alert(bp, __this_address);
                        xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
                }
        }
@@ -2745,15 +2745,10 @@ xlog_recover_buffer_pass2(
        if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
                buf_flags |= XBF_UNMAPPED;
 
-       bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
-                         buf_flags, NULL);
-       if (!bp)
-               return -ENOMEM;
-       error = bp->b_error;
-       if (error) {
-               xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
-               goto out_release;
-       }
+       error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
+                         buf_flags, &bp, NULL);
+       if (error)
+               return error;
 
        /*
         * Recover the buffer only if we get an LSN from it and it's less than
@@ -2950,17 +2945,10 @@ xlog_recover_inode_pass2(
        }
        trace_xfs_log_recover_inode_recover(log, in_f);
 
-       bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
-                         &xfs_inode_buf_ops);
-       if (!bp) {
-               error = -ENOMEM;
+       error = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
+                       0, &bp, &xfs_inode_buf_ops);
+       if (error)
                goto error;
-       }
-       error = bp->b_error;
-       if (error) {
-               xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
-               goto out_release;
-       }
        ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
        dip = xfs_buf_offset(bp, in_f->ilf_boffset);
 
@@ -5639,7 +5627,7 @@ xlog_do_recover(
        error = xfs_buf_submit(bp);
        if (error) {
                if (!XFS_FORCED_SHUTDOWN(mp)) {
-                       xfs_buf_ioerror_alert(bp, __func__);
+                       xfs_buf_ioerror_alert(bp, __this_address);
                        ASSERT(0);
                }
                xfs_buf_relse(bp);
index e723b26..b0ce04f 100644 (file)
@@ -143,8 +143,6 @@ xfs_reflink_find_shared(
        error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
        if (error)
                return error;
-       if (!agbp)
-               return -ENOMEM;
 
        cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
 
index d42b5a2..6209e7b 100644 (file)
@@ -826,12 +826,10 @@ xfs_growfs_rt_alloc(
                         * Get a buffer for the block.
                         */
                        d = XFS_FSB_TO_DADDR(mp, fsbno);
-                       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-                               mp->m_bsize, 0);
-                       if (bp == NULL) {
-                               error = -EIO;
+                       error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+                                       mp->m_bsize, 0, &bp);
+                       if (error)
                                goto out_trans_cancel;
-                       }
                        memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
                        xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
                        /*
index a25502b..d762d42 100644 (file)
@@ -53,20 +53,10 @@ xfs_readlink_bmap_ilocked(
                d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
                byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
 
-               bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0,
-                                 &xfs_symlink_buf_ops);
-               if (!bp)
-                       return -ENOMEM;
-               error = bp->b_error;
-               if (error) {
-                       xfs_buf_ioerror_alert(bp, __func__);
-                       xfs_buf_relse(bp);
-
-                       /* bad CRC means corrupted metadata */
-                       if (error == -EFSBADCRC)
-                               error = -EFSCORRUPTED;
-                       goto out;
-               }
+               error = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0,
+                               &bp, &xfs_symlink_buf_ops);
+               if (error)
+                       return error;
                byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
                if (pathlen < byte_cnt)
                        byte_cnt = pathlen;
@@ -290,12 +280,10 @@ xfs_symlink(
 
                        d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
                        byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
-                       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-                                              BTOBB(byte_cnt), 0);
-                       if (!bp) {
-                               error = -ENOMEM;
+                       error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+                                              BTOBB(byte_cnt), 0, &bp);
+                       if (error)
                                goto out_trans_cancel;
-                       }
                        bp->b_ops = &xfs_symlink_buf_ops;
 
                        byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
@@ -433,13 +421,12 @@ xfs_inactive_symlink_rmt(
         * Invalidate the block(s). No validation is done.
         */
        for (i = 0; i < nmaps; i++) {
-               bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
-                       XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
-                       XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
-               if (!bp) {
-                       error = -ENOMEM;
+               error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+                               XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
+                               XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0,
+                               &bp);
+               if (error)
                        goto error_trans_cancel;
-               }
                xfs_trans_binval(tp, bp);
        }
        /*
index 64d7f17..752c7fe 100644 (file)
@@ -169,21 +169,21 @@ int               xfs_trans_alloc_empty(struct xfs_mount *mp,
                        struct xfs_trans **tpp);
 void           xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
 
-struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
-                                      struct xfs_buftarg *target,
-                                      struct xfs_buf_map *map, int nmaps,
-                                      uint flags);
+int xfs_trans_get_buf_map(struct xfs_trans *tp, struct xfs_buftarg *target,
+               struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags,
+               struct xfs_buf **bpp);
 
-static inline struct xfs_buf *
+static inline int
 xfs_trans_get_buf(
        struct xfs_trans        *tp,
        struct xfs_buftarg      *target,
        xfs_daddr_t             blkno,
        int                     numblks,
-       uint                    flags)
+       uint                    flags,
+       struct xfs_buf          **bpp)
 {
        DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
-       return xfs_trans_get_buf_map(tp, target, &map, 1, flags);
+       return xfs_trans_get_buf_map(tp, target, &map, 1, flags, bpp);
 }
 
 int            xfs_trans_read_buf_map(struct xfs_mount *mp,
index b5b3a78..08174ff 100644 (file)
@@ -112,19 +112,22 @@ xfs_trans_bjoin(
  * If the transaction pointer is NULL, make this just a normal
  * get_buf() call.
  */
-struct xfs_buf *
+int
 xfs_trans_get_buf_map(
        struct xfs_trans        *tp,
        struct xfs_buftarg      *target,
        struct xfs_buf_map      *map,
        int                     nmaps,
-       xfs_buf_flags_t         flags)
+       xfs_buf_flags_t         flags,
+       struct xfs_buf          **bpp)
 {
        xfs_buf_t               *bp;
        struct xfs_buf_log_item *bip;
+       int                     error;
 
+       *bpp = NULL;
        if (!tp)
-               return xfs_buf_get_map(target, map, nmaps, flags);
+               return xfs_buf_get_map(target, map, nmaps, flags, bpp);
 
        /*
         * If we find the buffer in the cache with this transaction
@@ -146,19 +149,20 @@ xfs_trans_get_buf_map(
                ASSERT(atomic_read(&bip->bli_refcount) > 0);
                bip->bli_recur++;
                trace_xfs_trans_get_buf_recur(bip);
-               return bp;
+               *bpp = bp;
+               return 0;
        }
 
-       bp = xfs_buf_get_map(target, map, nmaps, flags);
-       if (bp == NULL) {
-               return NULL;
-       }
+       error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+       if (error)
+               return error;
 
        ASSERT(!bp->b_error);
 
        _xfs_trans_bjoin(tp, bp, 1);
        trace_xfs_trans_get_buf(bp->b_log_item);
-       return bp;
+       *bpp = bp;
+       return 0;
 }
 
 /*
@@ -276,7 +280,7 @@ xfs_trans_read_buf_map(
                ASSERT(bp->b_ops != NULL);
                error = xfs_buf_reverify(bp, ops);
                if (error) {
-                       xfs_buf_ioerror_alert(bp, __func__);
+                       xfs_buf_ioerror_alert(bp, __return_address);
 
                        if (tp->t_flags & XFS_TRANS_DIRTY)
                                xfs_force_shutdown(tp->t_mountp,
@@ -298,36 +302,17 @@ xfs_trans_read_buf_map(
                return 0;
        }
 
-       bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
-       if (!bp) {
-               if (!(flags & XBF_TRYLOCK))
-                       return -ENOMEM;
-               return tp ? 0 : -EAGAIN;
-       }
-
-       /*
-        * If we've had a read error, then the contents of the buffer are
-        * invalid and should not be used. To ensure that a followup read tries
-        * to pull the buffer from disk again, we clear the XBF_DONE flag and
-        * mark the buffer stale. This ensures that anyone who has a current
-        * reference to the buffer will interpret it's contents correctly and
-        * future cache lookups will also treat it as an empty, uninitialised
-        * buffer.
-        */
-       if (bp->b_error) {
-               error = bp->b_error;
-               if (!XFS_FORCED_SHUTDOWN(mp))
-                       xfs_buf_ioerror_alert(bp, __func__);
-               bp->b_flags &= ~XBF_DONE;
-               xfs_buf_stale(bp);
-
+       error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops,
+                       __return_address);
+       switch (error) {
+       case 0:
+               break;
+       default:
                if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
                        xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
-               xfs_buf_relse(bp);
-
-               /* bad CRC means corrupted metadata */
-               if (error == -EFSBADCRC)
-                       error = -EFSCORRUPTED;
+               /* fall through */
+       case -ENOMEM:
+       case -EAGAIN:
                return error;
        }
 
index ddfee1b..cd17d50 100644 (file)
@@ -4,5 +4,6 @@
 # (This file is not included when SRCARCH=um since UML borrows several
 # asm headers from the host architecutre.)
 
+mandatory-y += dma-contiguous.h
 mandatory-y += msi.h
 mandatory-y += simd.h
index c2de013..35e4a53 100644 (file)
@@ -74,7 +74,7 @@ do {                                                                  \
 
 #define raw_cpu_generic_add_return(pcp, val)                           \
 ({                                                                     \
-       typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));                       \
+       typeof(pcp) *__p = raw_cpu_ptr(&(pcp));                         \
                                                                        \
        *__p += val;                                                    \
        *__p;                                                           \
@@ -82,7 +82,7 @@ do {                                                                  \
 
 #define raw_cpu_generic_xchg(pcp, nval)                                        \
 ({                                                                     \
-       typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));                       \
+       typeof(pcp) *__p = raw_cpu_ptr(&(pcp));                         \
        typeof(pcp) __ret;                                              \
        __ret = *__p;                                                   \
        *__p = nval;                                                    \
@@ -91,7 +91,7 @@ do {                                                                  \
 
 #define raw_cpu_generic_cmpxchg(pcp, oval, nval)                       \
 ({                                                                     \
-       typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));                       \
+       typeof(pcp) *__p = raw_cpu_ptr(&(pcp));                         \
        typeof(pcp) __ret;                                              \
        __ret = *__p;                                                   \
        if (__ret == (oval))                                            \
@@ -101,8 +101,8 @@ do {                                                                        \
 
 #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
 ({                                                                     \
-       typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1));                    \
-       typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2));                    \
+       typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1));                      \
+       typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2));                      \
        int __ret = 0;                                                  \
        if (*__p1 == (oval1) && *__p2  == (oval2)) {                    \
                *__p1 = nval1;                                          \
index 798ea36..e2e2bef 100644 (file)
@@ -1238,4 +1238,24 @@ static inline bool arch_has_pfn_modify_check(void)
 #define mm_pmd_folded(mm)      __is_defined(__PAGETABLE_PMD_FOLDED)
 #endif
 
+/*
+ * p?d_leaf() - true if this entry is a final mapping to a physical address.
+ * This differs from p?d_huge() by the fact that they are always available (if
+ * the architecture supports large pages at the appropriate level) even
+ * if CONFIG_HUGETLB_PAGE is not defined.
+ * Only meaningful when called on a valid entry.
+ */
+#ifndef pgd_leaf
+#define pgd_leaf(x)    0
+#endif
+#ifndef p4d_leaf
+#define p4d_leaf(x)    0
+#endif
+#ifndef pud_leaf
+#define pud_leaf(x)    0
+#endif
+#ifndef pmd_leaf
+#define pmd_leaf(x)    0
+#endif
+
 #endif /* _ASM_GENERIC_PGTABLE_H */
index 2b10036..f391f6b 100644 (file)
  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
  *    there's large holes between the VMAs.
  *
+ *  - tlb_remove_table()
+ *
+ *    tlb_remove_table() is the basic primitive to free page-table directories
+ *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
+ *    tlb_remove_page() below, for when page directories are pages and have no
+ *    additional constraints.
+ *
+ *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
+ *
  *  - tlb_remove_page() / __tlb_remove_page()
  *  - tlb_remove_page_size() / __tlb_remove_page_size()
  *
  *
  * Additionally there are a few opt-in features:
  *
- *  HAVE_MMU_GATHER_PAGE_SIZE
+ *  MMU_GATHER_PAGE_SIZE
  *
  *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
  *  changes the size and provides mmu_gather::page_size to tlb_flush().
  *
- *  HAVE_RCU_TABLE_FREE
+ *  This might be useful if your architecture has size specific TLB
+ *  invalidation instructions.
+ *
+ *  MMU_GATHER_TABLE_FREE
  *
  *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
- *  for page directores (__p*_free_tlb()). This provides separate freeing of
- *  the page-table pages themselves in a semi-RCU fashion (see comment below).
- *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
- *  and therefore doesn't naturally serialize with software page-table walkers.
+ *  for page directores (__p*_free_tlb()).
+ *
+ *  Useful if your architecture has non-page page directories.
  *
  *  When used, an architecture is expected to provide __tlb_remove_table()
  *  which does the actual freeing of these pages.
  *
- *  HAVE_RCU_TABLE_NO_INVALIDATE
+ *  MMU_GATHER_RCU_TABLE_FREE
  *
- *  This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
- *  freeing the page-table pages. This can be avoided if you use
- *  HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
- *  page-tables natively.
+ *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
+ *  comment below).
+ *
+ *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ *  and therefore doesn't naturally serialize with software page-table walkers.
  *
  *  MMU_GATHER_NO_RANGE
  *
  *  Use this if your architecture lacks an efficient flush_tlb_range().
- */
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-/*
- * Semi RCU freeing of the page directories.
- *
- * This is needed by some architectures to implement software pagetable walkers.
  *
- * gup_fast() and other software pagetable walkers do a lockless page-table
- * walk and therefore needs some synchronization with the freeing of the page
- * directories. The chosen means to accomplish that is by disabling IRQs over
- * the walk.
+ *  MMU_GATHER_NO_GATHER
  *
- * Architectures that use IPIs to flush TLBs will then automagically DTRT,
- * since we unlink the page, flush TLBs, free the page. Since the disabling of
- * IRQs delays the completion of the TLB flush we can never observe an already
- * freed page.
- *
- * Architectures that do not have this (PPC) need to delay the freeing by some
- * other means, this is that means.
- *
- * What we do is batch the freed directory pages (tables) and RCU free them.
- * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
- * holds off grace periods.
- *
- * However, in order to batch these pages we need to allocate storage, this
- * allocation is deep inside the MM code and can thus easily fail on memory
- * pressure. To guarantee progress we fall back to single table freeing, see
- * the implementation of tlb_remove_table_one().
+ *  If the option is set the mmu_gather will not track individual pages for
+ *  delayed page free anymore. A platform that enables the option needs to
+ *  provide its own implementation of the __tlb_remove_page_size() function to
+ *  free pages.
  *
+ *  This is useful if your architecture already flushes TLB entries in the
+ *  various ptep_get_and_clear() functions.
  */
+
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
+
 struct mmu_table_batch {
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
        struct rcu_head         rcu;
+#endif
        unsigned int            nr;
        void                    *tables[0];
 };
@@ -189,9 +186,35 @@ struct mmu_table_batch {
 
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
+#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
+
+/*
+ * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
+ * page directories and we can use the normal page batching to free them.
+ */
+#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
+
+#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
+
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+/*
+ * This allows an architecture that does not use the linux page-tables for
+ * hardware to skip the TLBI when freeing page tables.
+ */
+#ifndef tlb_needs_table_invalidate
+#define tlb_needs_table_invalidate() (true)
 #endif
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#else
+
+#ifdef tlb_needs_table_invalidate
+#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
+#endif
+
+#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+
+
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
 /*
  * If we can't allocate a page to make a big batch of page pointers
  * to work on, then just handle a few from the on-stack structure.
@@ -227,7 +250,7 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
 struct mmu_gather {
        struct mm_struct        *mm;
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
        struct mmu_table_batch  *batch;
 #endif
 
@@ -266,22 +289,18 @@ struct mmu_gather {
 
        unsigned int            batch_count;
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
        struct mmu_gather_batch *active;
        struct mmu_gather_batch local;
        struct page             *__pages[MMU_GATHER_BUNDLE];
 
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
        unsigned int page_size;
 #endif
 #endif
 };
 
-void arch_tlb_gather_mmu(struct mmu_gather *tlb,
-       struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
-                        unsigned long start, unsigned long end, bool force);
 
 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
                                      unsigned long address,
@@ -394,7 +413,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
 
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
-       if (!tlb->end)
+       /*
+        * Anything calling __tlb_adjust_range() also sets at least one of
+        * these bits.
+        */
+       if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
+             tlb->cleared_puds || tlb->cleared_p4ds))
                return;
 
        tlb_flush(tlb);
@@ -426,7 +450,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 static inline void tlb_change_page_size(struct mmu_gather *tlb,
                                                     unsigned int page_size)
 {
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
        if (tlb->page_size && tlb->page_size != page_size) {
                if (!tlb->fullmm && !tlb->need_flush_all)
                        tlb_flush_mmu(tlb);
diff --git a/include/dt-bindings/clk/ti-dra7-atl.h b/include/dt-bindings/clk/ti-dra7-atl.h
deleted file mode 100644 (file)
index 42dd416..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * This header provides constants for DRA7 ATL (Audio Tracking Logic)
- *
- * The constants defined in this header are used in dts files
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- *
- * Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H
-#define _DT_BINDINGS_CLK_DRA7_ATL_H
-
-#define DRA7_ATL_WS_MCASP1_FSR         0
-#define DRA7_ATL_WS_MCASP1_FSX         1
-#define DRA7_ATL_WS_MCASP2_FSR         2
-#define DRA7_ATL_WS_MCASP2_FSX         3
-#define DRA7_ATL_WS_MCASP3_FSX         4
-#define DRA7_ATL_WS_MCASP4_FSX         5
-#define DRA7_ATL_WS_MCASP5_FSX         6
-#define DRA7_ATL_WS_MCASP6_FSX         7
-#define DRA7_ATL_WS_MCASP7_FSX         8
-#define DRA7_ATL_WS_MCASP8_FSX         9
-#define DRA7_ATL_WS_MCASP8_AHCLKX      10
-#define DRA7_ATL_WS_XREF_CLK3          11
-#define DRA7_ATL_WS_XREF_CLK0          12
-#define DRA7_ATL_WS_XREF_CLK1          13
-#define DRA7_ATL_WS_XREF_CLK2          14
-#define DRA7_ATL_WS_OSC1_X1            15
-
-#endif
index 72f2e84..8cec5a1 100644 (file)
 #define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET)
 #define DRA7_RTCSS_CLKCTRL     DRA7_RTC_CLKCTRL_INDEX(0x44)
 
+/* vip clocks */
+#define DRA7_VIP1_CLKCTRL      DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_VIP2_CLKCTRL      DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_VIP3_CLKCTRL      DRA7_CLKCTRL_INDEX(0x30)
+
+/* vpe clocks */
+#define DRA7_VPE_CLKCTRL_OFFSET        0x60
+#define DRA7_VPE_CLKCTRL_INDEX(offset) ((offset) - DRA7_VPE_CLKCTRL_OFFSET)
+#define DRA7_VPE_CLKCTRL       DRA7_VPE_CLKCTRL_INDEX(0x64)
+
 /* coreaon clocks */
 #define DRA7_SMARTREFLEX_MPU_CLKCTRL   DRA7_CLKCTRL_INDEX(0x28)
 #define DRA7_SMARTREFLEX_CORE_CLKCTRL  DRA7_CLKCTRL_INDEX(0x38)
@@ -78,6 +88,9 @@
 #define DRA7_DSS_CORE_CLKCTRL  DRA7_CLKCTRL_INDEX(0x20)
 #define DRA7_BB2D_CLKCTRL      DRA7_CLKCTRL_INDEX(0x30)
 
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL       DRA7_CLKCTRL_INDEX(0x20)
+
 /* l3init clocks */
 #define DRA7_MMC1_CLKCTRL      DRA7_CLKCTRL_INDEX(0x28)
 #define DRA7_MMC2_CLKCTRL      DRA7_CLKCTRL_INDEX(0x30)
 /* rtc clocks */
 #define DRA7_RTC_RTCSS_CLKCTRL DRA7_CLKCTRL_INDEX(0x44)
 
+/* vip clocks */
+#define DRA7_CAM_VIP1_CLKCTRL  DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_CAM_VIP2_CLKCTRL  DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_CAM_VIP3_CLKCTRL  DRA7_CLKCTRL_INDEX(0x30)
+
+/* vpe clocks */
+#define DRA7_VPE_CLKCTRL_OFFSET        0x60
+#define DRA7_VPE_CLKCTRL_INDEX(offset) ((offset) - DRA7_VPE_CLKCTRL_OFFSET)
+#define DRA7_VPE_VPE_CLKCTRL   DRA7_VPE_CLKCTRL_INDEX(0x64)
+
 /* coreaon clocks */
 #define DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL   DRA7_CLKCTRL_INDEX(0x28)
 #define DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL  DRA7_CLKCTRL_INDEX(0x38)
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
new file mode 100644 (file)
index 0000000..2fab631
--- /dev/null
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8MP_H
+#define __DT_BINDINGS_CLOCK_IMX8MP_H
+
+#define IMX8MP_CLK_DUMMY                       0
+#define IMX8MP_CLK_32K                         1
+#define IMX8MP_CLK_24M                         2
+#define IMX8MP_OSC_HDMI_CLK                    3
+#define IMX8MP_CLK_EXT1                                4
+#define IMX8MP_CLK_EXT2                                5
+#define IMX8MP_CLK_EXT3                                6
+#define IMX8MP_CLK_EXT4                                7
+#define IMX8MP_AUDIO_PLL1_REF_SEL              8
+#define IMX8MP_AUDIO_PLL2_REF_SEL              9
+#define IMX8MP_VIDEO_PLL1_REF_SEL              10
+#define IMX8MP_DRAM_PLL_REF_SEL                        11
+#define IMX8MP_GPU_PLL_REF_SEL                 12
+#define IMX8MP_VPU_PLL_REF_SEL                 13
+#define IMX8MP_ARM_PLL_REF_SEL                 14
+#define IMX8MP_SYS_PLL1_REF_SEL                        15
+#define IMX8MP_SYS_PLL2_REF_SEL                        16
+#define IMX8MP_SYS_PLL3_REF_SEL                        17
+#define IMX8MP_AUDIO_PLL1                      18
+#define IMX8MP_AUDIO_PLL2                      19
+#define IMX8MP_VIDEO_PLL1                      20
+#define IMX8MP_DRAM_PLL                                21
+#define IMX8MP_GPU_PLL                         22
+#define IMX8MP_VPU_PLL                         23
+#define IMX8MP_ARM_PLL                         24
+#define IMX8MP_SYS_PLL1                                25
+#define IMX8MP_SYS_PLL2                                26
+#define IMX8MP_SYS_PLL3                                27
+#define IMX8MP_AUDIO_PLL1_BYPASS               28
+#define IMX8MP_AUDIO_PLL2_BYPASS               29
+#define IMX8MP_VIDEO_PLL1_BYPASS               30
+#define IMX8MP_DRAM_PLL_BYPASS                 31
+#define IMX8MP_GPU_PLL_BYPASS                  32
+#define IMX8MP_VPU_PLL_BYPASS                  33
+#define IMX8MP_ARM_PLL_BYPASS                  34
+#define IMX8MP_SYS_PLL1_BYPASS                 35
+#define IMX8MP_SYS_PLL2_BYPASS                 36
+#define IMX8MP_SYS_PLL3_BYPASS                 37
+#define IMX8MP_AUDIO_PLL1_OUT                  38
+#define IMX8MP_AUDIO_PLL2_OUT                  39
+#define IMX8MP_VIDEO_PLL1_OUT                  40
+#define IMX8MP_DRAM_PLL_OUT                    41
+#define IMX8MP_GPU_PLL_OUT                     42
+#define IMX8MP_VPU_PLL_OUT                     43
+#define IMX8MP_ARM_PLL_OUT                     44
+#define IMX8MP_SYS_PLL1_OUT                    45
+#define IMX8MP_SYS_PLL2_OUT                    46
+#define IMX8MP_SYS_PLL3_OUT                    47
+#define IMX8MP_SYS_PLL1_40M                    48
+#define IMX8MP_SYS_PLL1_80M                    49
+#define IMX8MP_SYS_PLL1_100M                   50
+#define IMX8MP_SYS_PLL1_133M                   51
+#define IMX8MP_SYS_PLL1_160M                   52
+#define IMX8MP_SYS_PLL1_200M                   53
+#define IMX8MP_SYS_PLL1_266M                   54
+#define IMX8MP_SYS_PLL1_400M                   55
+#define IMX8MP_SYS_PLL1_800M                   56
+#define IMX8MP_SYS_PLL2_50M                    57
+#define IMX8MP_SYS_PLL2_100M                   58
+#define IMX8MP_SYS_PLL2_125M                   59
+#define IMX8MP_SYS_PLL2_166M                   60
+#define IMX8MP_SYS_PLL2_200M                   61
+#define IMX8MP_SYS_PLL2_250M                   62
+#define IMX8MP_SYS_PLL2_333M                   63
+#define IMX8MP_SYS_PLL2_500M                   64
+#define IMX8MP_SYS_PLL2_1000M                  65
+#define IMX8MP_CLK_A53_SRC                     66
+#define IMX8MP_CLK_M7_SRC                      67
+#define IMX8MP_CLK_ML_SRC                      68
+#define IMX8MP_CLK_GPU3D_CORE_SRC              69
+#define IMX8MP_CLK_GPU3D_SHADER_SRC            70
+#define IMX8MP_CLK_GPU2D_SRC                   71
+#define IMX8MP_CLK_AUDIO_AXI_SRC               72
+#define IMX8MP_CLK_HSIO_AXI_SRC                        73
+#define IMX8MP_CLK_MEDIA_ISP_SRC               74
+#define IMX8MP_CLK_A53_CG                      75
+#define IMX8MP_CLK_M4_CG                       76
+#define IMX8MP_CLK_ML_CG                       77
+#define IMX8MP_CLK_GPU3D_CORE_CG               78
+#define IMX8MP_CLK_GPU3D_SHADER_CG             79
+#define IMX8MP_CLK_GPU2D_CG                    80
+#define IMX8MP_CLK_AUDIO_AXI_CG                        81
+#define IMX8MP_CLK_HSIO_AXI_CG                 82
+#define IMX8MP_CLK_MEDIA_ISP_CG                        83
+#define IMX8MP_CLK_A53_DIV                     84
+#define IMX8MP_CLK_M7_DIV                      85
+#define IMX8MP_CLK_ML_DIV                      86
+#define IMX8MP_CLK_GPU3D_CORE_DIV              87
+#define IMX8MP_CLK_GPU3D_SHADER_DIV            88
+#define IMX8MP_CLK_GPU2D_DIV                   89
+#define IMX8MP_CLK_AUDIO_AXI_DIV               90
+#define IMX8MP_CLK_HSIO_AXI_DIV                        91
+#define IMX8MP_CLK_MEDIA_ISP_DIV               92
+#define IMX8MP_CLK_MAIN_AXI                    93
+#define IMX8MP_CLK_ENET_AXI                    94
+#define IMX8MP_CLK_NAND_USDHC_BUS              95
+#define IMX8MP_CLK_VPU_BUS                     96
+#define IMX8MP_CLK_MEDIA_AXI                   97
+#define IMX8MP_CLK_MEDIA_APB                   98
+#define IMX8MP_CLK_HDMI_APB                    99
+#define IMX8MP_CLK_HDMI_AXI                    100
+#define IMX8MP_CLK_GPU_AXI                     101
+#define IMX8MP_CLK_GPU_AHB                     102
+#define IMX8MP_CLK_NOC                         103
+#define IMX8MP_CLK_NOC_IO                      104
+#define IMX8MP_CLK_ML_AXI                      105
+#define IMX8MP_CLK_ML_AHB                      106
+#define IMX8MP_CLK_AHB                         107
+#define IMX8MP_CLK_AUDIO_AHB                   108
+#define IMX8MP_CLK_MIPI_DSI_ESC_RX             109
+#define IMX8MP_CLK_IPG_ROOT                    110
+#define IMX8MP_CLK_IPG_AUDIO_ROOT              111
+#define IMX8MP_CLK_DRAM_ALT                    112
+#define IMX8MP_CLK_DRAM_APB                    113
+#define IMX8MP_CLK_VPU_G1                      114
+#define IMX8MP_CLK_VPU_G2                      115
+#define IMX8MP_CLK_CAN1                                116
+#define IMX8MP_CLK_CAN2                                117
+#define IMX8MP_CLK_MEMREPAIR                   118
+#define IMX8MP_CLK_PCIE_PHY                    119
+#define IMX8MP_CLK_PCIE_AUX                    120
+#define IMX8MP_CLK_I2C5                                121
+#define IMX8MP_CLK_I2C6                                122
+#define IMX8MP_CLK_SAI1                                123
+#define IMX8MP_CLK_SAI2                                124
+#define IMX8MP_CLK_SAI3                                125
+#define IMX8MP_CLK_SAI4                                126
+#define IMX8MP_CLK_SAI5                                127
+#define IMX8MP_CLK_SAI6                                128
+#define IMX8MP_CLK_ENET_QOS                    129
+#define IMX8MP_CLK_ENET_QOS_TIMER              130
+#define IMX8MP_CLK_ENET_REF                    131
+#define IMX8MP_CLK_ENET_TIMER                  132
+#define IMX8MP_CLK_ENET_PHY_REF                        133
+#define IMX8MP_CLK_NAND                                134
+#define IMX8MP_CLK_QSPI                                135
+#define IMX8MP_CLK_USDHC1                      136
+#define IMX8MP_CLK_USDHC2                      137
+#define IMX8MP_CLK_I2C1                                138
+#define IMX8MP_CLK_I2C2                                139
+#define IMX8MP_CLK_I2C3                                140
+#define IMX8MP_CLK_I2C4                                141
+#define IMX8MP_CLK_UART1                       142
+#define IMX8MP_CLK_UART2                       143
+#define IMX8MP_CLK_UART3                       144
+#define IMX8MP_CLK_UART4                       145
+#define IMX8MP_CLK_USB_CORE_REF                        146
+#define IMX8MP_CLK_USB_PHY_REF                 147
+#define IMX8MP_CLK_GIC                         148
+#define IMX8MP_CLK_ECSPI1                      149
+#define IMX8MP_CLK_ECSPI2                      150
+#define IMX8MP_CLK_PWM1                                151
+#define IMX8MP_CLK_PWM2                                152
+#define IMX8MP_CLK_PWM3                                153
+#define IMX8MP_CLK_PWM4                                154
+#define IMX8MP_CLK_GPT1                                155
+#define IMX8MP_CLK_GPT2                                156
+#define IMX8MP_CLK_GPT3                                157
+#define IMX8MP_CLK_GPT4                                158
+#define IMX8MP_CLK_GPT5                                159
+#define IMX8MP_CLK_GPT6                                160
+#define IMX8MP_CLK_TRACE                       161
+#define IMX8MP_CLK_WDOG                                162
+#define IMX8MP_CLK_WRCLK                       163
+#define IMX8MP_CLK_IPP_DO_CLKO1                        164
+#define IMX8MP_CLK_IPP_DO_CLKO2                        165
+#define IMX8MP_CLK_HDMI_FDCC_TST               166
+#define IMX8MP_CLK_HDMI_27M                    167
+#define IMX8MP_CLK_HDMI_REF_266M               168
+#define IMX8MP_CLK_USDHC3                      169
+#define IMX8MP_CLK_MEDIA_CAM1_PIX              170
+#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF         171
+#define IMX8MP_CLK_MEDIA_DISP1_PIX             172
+#define IMX8MP_CLK_MEDIA_CAM2_PIX              173
+#define IMX8MP_CLK_MEDIA_MIPI_PHY2_REF         174
+#define IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC         175
+#define IMX8MP_CLK_PCIE2_CTRL                  176
+#define IMX8MP_CLK_PCIE2_PHY                   177
+#define IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE                178
+#define IMX8MP_CLK_ECSPI3                      179
+#define IMX8MP_CLK_PDM                         180
+#define IMX8MP_CLK_VPU_VC8000E                 181
+#define IMX8MP_CLK_SAI7                                182
+#define IMX8MP_CLK_GPC_ROOT                    183
+#define IMX8MP_CLK_ANAMIX_ROOT                 184
+#define IMX8MP_CLK_CPU_ROOT                    185
+#define IMX8MP_CLK_CSU_ROOT                    186
+#define IMX8MP_CLK_DEBUG_ROOT                  187
+#define IMX8MP_CLK_DRAM1_ROOT                  188
+#define IMX8MP_CLK_ECSPI1_ROOT                 189
+#define IMX8MP_CLK_ECSPI2_ROOT                 190
+#define IMX8MP_CLK_ECSPI3_ROOT                 191
+#define IMX8MP_CLK_ENET1_ROOT                  192
+#define IMX8MP_CLK_GPIO1_ROOT                  193
+#define IMX8MP_CLK_GPIO2_ROOT                  194
+#define IMX8MP_CLK_GPIO3_ROOT                  195
+#define IMX8MP_CLK_GPIO4_ROOT                  196
+#define IMX8MP_CLK_GPIO5_ROOT                  197
+#define IMX8MP_CLK_GPT1_ROOT                   198
+#define IMX8MP_CLK_GPT2_ROOT                   199
+#define IMX8MP_CLK_GPT3_ROOT                   200
+#define IMX8MP_CLK_GPT4_ROOT                   201
+#define IMX8MP_CLK_GPT5_ROOT                   202
+#define IMX8MP_CLK_GPT6_ROOT                   203
+#define IMX8MP_CLK_HS_ROOT                     204
+#define IMX8MP_CLK_I2C1_ROOT                   205
+#define IMX8MP_CLK_I2C2_ROOT                   206
+#define IMX8MP_CLK_I2C3_ROOT                   207
+#define IMX8MP_CLK_I2C4_ROOT                   208
+#define IMX8MP_CLK_IOMUX_ROOT                  209
+#define IMX8MP_CLK_IPMUX1_ROOT                 210
+#define IMX8MP_CLK_IPMUX2_ROOT                 211
+#define IMX8MP_CLK_IPMUX3_ROOT                 212
+#define IMX8MP_CLK_MU_ROOT                     213
+#define IMX8MP_CLK_OCOTP_ROOT                  214
+#define IMX8MP_CLK_OCRAM_ROOT                  215
+#define IMX8MP_CLK_OCRAM_S_ROOT                        216
+#define IMX8MP_CLK_PCIE_ROOT                   217
+#define IMX8MP_CLK_PERFMON1_ROOT               218
+#define IMX8MP_CLK_PERFMON2_ROOT               219
+#define IMX8MP_CLK_PWM1_ROOT                   220
+#define IMX8MP_CLK_PWM2_ROOT                   221
+#define IMX8MP_CLK_PWM3_ROOT                   222
+#define IMX8MP_CLK_PWM4_ROOT                   223
+#define IMX8MP_CLK_QOS_ROOT                    224
+#define IMX8MP_CLK_QOS_ENET_ROOT               225
+#define IMX8MP_CLK_QSPI_ROOT                   226
+#define IMX8MP_CLK_NAND_ROOT                   227
+#define IMX8MP_CLK_NAND_USDHC_BUS_RAWNAND_CLK  228
+#define IMX8MP_CLK_RDC_ROOT                    229
+#define IMX8MP_CLK_ROM_ROOT                    230
+#define IMX8MP_CLK_I2C5_ROOT                   231
+#define IMX8MP_CLK_I2C6_ROOT                   232
+#define IMX8MP_CLK_CAN1_ROOT                   233
+#define IMX8MP_CLK_CAN2_ROOT                   234
+#define IMX8MP_CLK_SCTR_ROOT                   235
+#define IMX8MP_CLK_SDMA1_ROOT                  236
+#define IMX8MP_CLK_ENET_QOS_ROOT               237
+#define IMX8MP_CLK_SEC_DEBUG_ROOT              238
+#define IMX8MP_CLK_SEMA1_ROOT                  239
+#define IMX8MP_CLK_SEMA2_ROOT                  240
+#define IMX8MP_CLK_IRQ_STEER_ROOT              241
+#define IMX8MP_CLK_SIM_ENET_ROOT               242
+#define IMX8MP_CLK_SIM_M_ROOT                  243
+#define IMX8MP_CLK_SIM_MAIN_ROOT               244
+#define IMX8MP_CLK_SIM_S_ROOT                  245
+#define IMX8MP_CLK_SIM_WAKEUP_ROOT             246
+#define IMX8MP_CLK_GPU2D_ROOT                  247
+#define IMX8MP_CLK_GPU3D_ROOT                  248
+#define IMX8MP_CLK_SNVS_ROOT                   249
+#define IMX8MP_CLK_TRACE_ROOT                  250
+#define IMX8MP_CLK_UART1_ROOT                  251
+#define IMX8MP_CLK_UART2_ROOT                  252
+#define IMX8MP_CLK_UART3_ROOT                  253
+#define IMX8MP_CLK_UART4_ROOT                  254
+#define IMX8MP_CLK_USB_ROOT                    255
+#define IMX8MP_CLK_USB_PHY_ROOT                        256
+#define IMX8MP_CLK_USDHC1_ROOT                 257
+#define IMX8MP_CLK_USDHC2_ROOT                 258
+#define IMX8MP_CLK_WDOG1_ROOT                  259
+#define IMX8MP_CLK_WDOG2_ROOT                  260
+#define IMX8MP_CLK_WDOG3_ROOT                  261
+#define IMX8MP_CLK_VPU_G1_ROOT                 262
+#define IMX8MP_CLK_GPU_ROOT                    263
+#define IMX8MP_CLK_NOC_WRAPPER_ROOT            264
+#define IMX8MP_CLK_VPU_VC8KE_ROOT              265
+#define IMX8MP_CLK_VPU_G2_ROOT                 266
+#define IMX8MP_CLK_NPU_ROOT                    267
+#define IMX8MP_CLK_HSIO_ROOT                   268
+#define IMX8MP_CLK_MEDIA_APB_ROOT              269
+#define IMX8MP_CLK_MEDIA_AXI_ROOT              270
+#define IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT         271
+#define IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT         272
+#define IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT                273
+#define IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT                274
+#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT    275
+#define IMX8MP_CLK_MEDIA_ISP_ROOT              276
+#define IMX8MP_CLK_USDHC3_ROOT                 277
+#define IMX8MP_CLK_HDMI_ROOT                   278
+#define IMX8MP_CLK_XTAL_ROOT                   279
+#define IMX8MP_CLK_PLL_ROOT                    280
+#define IMX8MP_CLK_TSENSOR_ROOT                        281
+#define IMX8MP_CLK_VPU_ROOT                    282
+#define IMX8MP_CLK_MRPR_ROOT                   283
+#define IMX8MP_CLK_AUDIO_ROOT                  284
+#define IMX8MP_CLK_DRAM_ALT_ROOT               285
+#define IMX8MP_CLK_DRAM_CORE                   286
+#define IMX8MP_CLK_ARM                         287
+
+#define IMX8MP_CLK_END                         288
+
+#endif
diff --git a/include/dt-bindings/clock/meson8-ddr-clkc.h b/include/dt-bindings/clock/meson8-ddr-clkc.h
new file mode 100644 (file)
index 0000000..a8e0fa2
--- /dev/null
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define DDR_CLKID_DDR_PLL_DCO                  0
+#define DDR_CLKID_DDR_PLL                      1
index ba67206..2b4fd9a 100644 (file)
@@ -16,6 +16,7 @@
 
 /* abe clocks */
 #define OMAP5_L4_ABE_CLKCTRL   OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_AESS_CLKCTRL     OMAP5_CLKCTRL_INDEX(0x28)
 #define OMAP5_MCPDM_CLKCTRL    OMAP5_CLKCTRL_INDEX(0x30)
 #define OMAP5_DMIC_CLKCTRL     OMAP5_CLKCTRL_INDEX(0x38)
 #define OMAP5_MCBSP1_CLKCTRL   OMAP5_CLKCTRL_INDEX(0x48)
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7180.h b/include/dt-bindings/clock/qcom,dispcc-sc7180.h
new file mode 100644 (file)
index 0000000..b9b5161
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7180_H
+
+#define DISP_CC_PLL0                           0
+#define DISP_CC_PLL0_OUT_EVEN                  1
+#define DISP_CC_MDSS_AHB_CLK                   2
+#define DISP_CC_MDSS_AHB_CLK_SRC               3
+#define DISP_CC_MDSS_BYTE0_CLK                 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC             5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC         6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK            7
+#define DISP_CC_MDSS_DP_AUX_CLK                        8
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC            9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK             10
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC         11
+#define DISP_CC_MDSS_DP_LINK_CLK               12
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC           13
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC       14
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK          15
+#define DISP_CC_MDSS_DP_PIXEL_CLK              16
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC          17
+#define DISP_CC_MDSS_ESC0_CLK                  18
+#define DISP_CC_MDSS_ESC0_CLK_SRC              19
+#define DISP_CC_MDSS_MDP_CLK                   20
+#define DISP_CC_MDSS_MDP_CLK_SRC               21
+#define DISP_CC_MDSS_MDP_LUT_CLK               22
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK          23
+#define DISP_CC_MDSS_PCLK0_CLK                 24
+#define DISP_CC_MDSS_PCLK0_CLK_SRC             25
+#define DISP_CC_MDSS_ROT_CLK                   26
+#define DISP_CC_MDSS_ROT_CLK_SRC               27
+#define DISP_CC_MDSS_RSCC_AHB_CLK              28
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK            29
+#define DISP_CC_MDSS_VSYNC_CLK                 30
+#define DISP_CC_MDSS_VSYNC_CLK_SRC             31
+#define DISP_CC_XO_CLK                         32
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC                              0
+
+#endif
index 11eed4b..4016fd1 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
 #define DISP_CC_PLL0                                           25
 #define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC                         26
 #define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC                         27
+#define DISP_CC_MDSS_DP_AUX_CLK                                        28
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC                            29
+#define DISP_CC_MDSS_DP_CRYPTO_CLK                             30
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC                         31
+#define DISP_CC_MDSS_DP_LINK_CLK                               32
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC                           33
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK                          34
+#define DISP_CC_MDSS_DP_PIXEL1_CLK                             35
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC                         36
+#define DISP_CC_MDSS_DP_PIXEL_CLK                              37
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC                          38
 
 /* DISP_CC Reset */
 #define DISP_CC_MDSS_RSCC_BCR                                  0
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq6018.h b/include/dt-bindings/clock/qcom,gcc-ipq6018.h
new file mode 100644 (file)
index 0000000..6f4be3a
--- /dev/null
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_6018_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_6018_H
+
+#define GPLL0                                  0
+#define UBI32_PLL                              1
+#define GPLL6                                  2
+#define GPLL4                                  3
+#define PCNOC_BFDCD_CLK_SRC                    4
+#define GPLL2                                  5
+#define NSS_CRYPTO_PLL                         6
+#define NSS_PPE_CLK_SRC                                7
+#define GCC_XO_CLK_SRC                         8
+#define NSS_CE_CLK_SRC                         9
+#define GCC_SLEEP_CLK_SRC                      10
+#define APSS_AHB_CLK_SRC                       11
+#define NSS_PORT5_RX_CLK_SRC                   12
+#define NSS_PORT5_TX_CLK_SRC                   13
+#define PCIE0_AXI_CLK_SRC                      14
+#define USB0_MASTER_CLK_SRC                    15
+#define APSS_AHB_POSTDIV_CLK_SRC               16
+#define NSS_PORT1_RX_CLK_SRC                   17
+#define NSS_PORT1_TX_CLK_SRC                   18
+#define NSS_PORT2_RX_CLK_SRC                   19
+#define NSS_PORT2_TX_CLK_SRC                   20
+#define NSS_PORT3_RX_CLK_SRC                   21
+#define NSS_PORT3_TX_CLK_SRC                   22
+#define NSS_PORT4_RX_CLK_SRC                   23
+#define NSS_PORT4_TX_CLK_SRC                   24
+#define NSS_PORT5_RX_DIV_CLK_SRC               25
+#define NSS_PORT5_TX_DIV_CLK_SRC               26
+#define APSS_AXI_CLK_SRC                       27
+#define NSS_CRYPTO_CLK_SRC                     28
+#define NSS_PORT1_RX_DIV_CLK_SRC               29
+#define NSS_PORT1_TX_DIV_CLK_SRC               30
+#define NSS_PORT2_RX_DIV_CLK_SRC               31
+#define NSS_PORT2_TX_DIV_CLK_SRC               32
+#define NSS_PORT3_RX_DIV_CLK_SRC               33
+#define NSS_PORT3_TX_DIV_CLK_SRC               34
+#define NSS_PORT4_RX_DIV_CLK_SRC               35
+#define NSS_PORT4_TX_DIV_CLK_SRC               36
+#define NSS_UBI0_CLK_SRC                       37
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC            38
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC            39
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC            40
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC            41
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC            42
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC            43
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC            44
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC            45
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC            46
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC            47
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC            48
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC            49
+#define BLSP1_UART1_APPS_CLK_SRC               50
+#define BLSP1_UART2_APPS_CLK_SRC               51
+#define BLSP1_UART3_APPS_CLK_SRC               52
+#define BLSP1_UART4_APPS_CLK_SRC               53
+#define BLSP1_UART5_APPS_CLK_SRC               54
+#define BLSP1_UART6_APPS_CLK_SRC               55
+#define CRYPTO_CLK_SRC                         56
+#define NSS_UBI0_DIV_CLK_SRC                   57
+#define PCIE0_AUX_CLK_SRC                      58
+#define PCIE0_PIPE_CLK_SRC                     59
+#define SDCC1_APPS_CLK_SRC                     60
+#define USB0_AUX_CLK_SRC                       61
+#define USB0_MOCK_UTMI_CLK_SRC                 62
+#define USB0_PIPE_CLK_SRC                      63
+#define USB1_MOCK_UTMI_CLK_SRC                 64
+#define GCC_APSS_AHB_CLK                       65
+#define GCC_APSS_AXI_CLK                       66
+#define GCC_BLSP1_AHB_CLK                      67
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK            68
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK            69
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK            70
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK            71
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK            72
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK            73
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK            74
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK            75
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK            76
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK            77
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK            78
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK            79
+#define GCC_BLSP1_UART1_APPS_CLK               80
+#define GCC_BLSP1_UART2_APPS_CLK               81
+#define GCC_BLSP1_UART3_APPS_CLK               82
+#define GCC_BLSP1_UART4_APPS_CLK               83
+#define GCC_BLSP1_UART5_APPS_CLK               84
+#define GCC_BLSP1_UART6_APPS_CLK               85
+#define GCC_CRYPTO_AHB_CLK                     86
+#define GCC_CRYPTO_AXI_CLK                     87
+#define GCC_CRYPTO_CLK                         88
+#define GCC_XO_CLK                             89
+#define GCC_XO_DIV4_CLK                                90
+#define GCC_MDIO_AHB_CLK                       91
+#define GCC_CRYPTO_PPE_CLK                     92
+#define GCC_NSS_CE_APB_CLK                     93
+#define GCC_NSS_CE_AXI_CLK                     94
+#define GCC_NSS_CFG_CLK                                95
+#define GCC_NSS_CRYPTO_CLK                     96
+#define GCC_NSS_CSR_CLK                                97
+#define GCC_NSS_EDMA_CFG_CLK                   98
+#define GCC_NSS_EDMA_CLK                       99
+#define GCC_NSS_NOC_CLK                                100
+#define GCC_NSS_PORT1_RX_CLK                   101
+#define GCC_NSS_PORT1_TX_CLK                   102
+#define GCC_NSS_PORT2_RX_CLK                   103
+#define GCC_NSS_PORT2_TX_CLK                   104
+#define GCC_NSS_PORT3_RX_CLK                   105
+#define GCC_NSS_PORT3_TX_CLK                   106
+#define GCC_NSS_PORT4_RX_CLK                   107
+#define GCC_NSS_PORT4_TX_CLK                   108
+#define GCC_NSS_PORT5_RX_CLK                   109
+#define GCC_NSS_PORT5_TX_CLK                   110
+#define GCC_NSS_PPE_CFG_CLK                    111
+#define GCC_NSS_PPE_CLK                                112
+#define GCC_NSS_PPE_IPE_CLK                    113
+#define GCC_NSS_PTP_REF_CLK                    114
+#define GCC_NSSNOC_CE_APB_CLK                  115
+#define GCC_NSSNOC_CE_AXI_CLK                  116
+#define GCC_NSSNOC_CRYPTO_CLK                  117
+#define GCC_NSSNOC_PPE_CFG_CLK                 118
+#define GCC_NSSNOC_PPE_CLK                     119
+#define GCC_NSSNOC_QOSGEN_REF_CLK              120
+#define GCC_NSSNOC_TIMEOUT_REF_CLK             121
+#define GCC_NSSNOC_UBI0_AHB_CLK                        122
+#define GCC_PORT1_MAC_CLK                      123
+#define GCC_PORT2_MAC_CLK                      124
+#define GCC_PORT3_MAC_CLK                      125
+#define GCC_PORT4_MAC_CLK                      126
+#define GCC_PORT5_MAC_CLK                      127
+#define GCC_UBI0_AHB_CLK                       128
+#define GCC_UBI0_AXI_CLK                       129
+#define GCC_UBI0_CORE_CLK                      130
+#define GCC_PCIE0_AHB_CLK                      131
+#define GCC_PCIE0_AUX_CLK                      132
+#define GCC_PCIE0_AXI_M_CLK                    133
+#define GCC_PCIE0_AXI_S_CLK                    134
+#define GCC_PCIE0_PIPE_CLK                     135
+#define GCC_PRNG_AHB_CLK                       136
+#define GCC_QPIC_AHB_CLK                       137
+#define GCC_QPIC_CLK                           138
+#define GCC_SDCC1_AHB_CLK                      139
+#define GCC_SDCC1_APPS_CLK                     140
+#define GCC_UNIPHY0_AHB_CLK                    141
+#define GCC_UNIPHY0_PORT1_RX_CLK               142
+#define GCC_UNIPHY0_PORT1_TX_CLK               143
+#define GCC_UNIPHY0_PORT2_RX_CLK               144
+#define GCC_UNIPHY0_PORT2_TX_CLK               145
+#define GCC_UNIPHY0_PORT3_RX_CLK               146
+#define GCC_UNIPHY0_PORT3_TX_CLK               147
+#define GCC_UNIPHY0_PORT4_RX_CLK               148
+#define GCC_UNIPHY0_PORT4_TX_CLK               149
+#define GCC_UNIPHY0_PORT5_RX_CLK               150
+#define GCC_UNIPHY0_PORT5_TX_CLK               151
+#define GCC_UNIPHY0_SYS_CLK                    152
+#define GCC_UNIPHY1_AHB_CLK                    153
+#define GCC_UNIPHY1_PORT5_RX_CLK               154
+#define GCC_UNIPHY1_PORT5_TX_CLK               155
+#define GCC_UNIPHY1_SYS_CLK                    156
+#define GCC_USB0_AUX_CLK                       157
+#define GCC_USB0_MASTER_CLK                    158
+#define GCC_USB0_MOCK_UTMI_CLK                 159
+#define GCC_USB0_PHY_CFG_AHB_CLK               160
+#define GCC_USB0_PIPE_CLK                      161
+#define GCC_USB0_SLEEP_CLK                     162
+#define GCC_USB1_MASTER_CLK                    163
+#define GCC_USB1_MOCK_UTMI_CLK                 164
+#define GCC_USB1_PHY_CFG_AHB_CLK               165
+#define GCC_USB1_SLEEP_CLK                     166
+#define GP1_CLK_SRC                            167
+#define GP2_CLK_SRC                            168
+#define GP3_CLK_SRC                            169
+#define GCC_GP1_CLK                            170
+#define GCC_GP2_CLK                            171
+#define GCC_GP3_CLK                            172
+#define SYSTEM_NOC_BFDCD_CLK_SRC               173
+#define GCC_NSSNOC_SNOC_CLK                    174
+#define GCC_UBI0_NC_AXI_CLK                    175
+#define GCC_UBI1_NC_AXI_CLK                    176
+#define GPLL0_MAIN                             177
+#define UBI32_PLL_MAIN                         178
+#define GPLL6_MAIN                             179
+#define GPLL4_MAIN                             180
+#define GPLL2_MAIN                             181
+#define NSS_CRYPTO_PLL_MAIN                    182
+#define GCC_CMN_12GPLL_AHB_CLK                 183
+#define GCC_CMN_12GPLL_SYS_CLK                 184
+#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK          185
+#define GCC_SYS_NOC_USB0_AXI_CLK               186
+#define GCC_SYS_NOC_PCIE0_AXI_CLK              187
+#define QDSS_TSCTR_CLK_SRC                     188
+#define QDSS_AT_CLK_SRC                                189
+#define GCC_QDSS_AT_CLK                                190
+#define GCC_QDSS_DAP_CLK                       191
+#define ADSS_PWM_CLK_SRC                       192
+#define GCC_ADSS_PWM_CLK                       193
+#define SDCC1_ICE_CORE_CLK_SRC                 194
+#define GCC_SDCC1_ICE_CORE_CLK                 195
+#define GCC_DCC_CLK                            196
+#define PCIE0_RCHNG_CLK_SRC                    197
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK             198
+#define PCIE0_RCHNG_CLK                                199
+#define UBI32_MEM_NOC_BFDCD_CLK_SRC            200
+#define WCSS_AHB_CLK_SRC                       201
+#define Q6_AXI_CLK_SRC                         202
+#define GCC_Q6SS_PCLKDBG_CLK                   203
+#define GCC_Q6_TSCTR_1TO2_CLK                  204
+#define GCC_WCSS_CORE_TBU_CLK                  205
+#define GCC_WCSS_AXI_M_CLK                     206
+#define GCC_SYS_NOC_WCSS_AHB_CLK               207
+#define GCC_Q6_AXIM_CLK                                208
+#define GCC_Q6SS_ATBM_CLK                      209
+#define GCC_WCSS_Q6_TBU_CLK                    210
+#define GCC_Q6_AXIM2_CLK                       211
+#define GCC_Q6_AHB_CLK                         212
+#define GCC_Q6_AHB_S_CLK                       213
+#define GCC_WCSS_DBG_IFC_APB_CLK               214
+#define GCC_WCSS_DBG_IFC_ATB_CLK               215
+#define GCC_WCSS_DBG_IFC_NTS_CLK               216
+#define GCC_WCSS_DBG_IFC_DAPBUS_CLK            217
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK           218
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK           219
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK           220
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_CLK                221
+#define GCC_WCSS_ECAHB_CLK                     222
+#define GCC_WCSS_ACMT_CLK                      223
+#define GCC_WCSS_AHB_S_CLK                     224
+#define GCC_RBCPR_WCSS_CLK                     225
+#define RBCPR_WCSS_CLK_SRC                     226
+#define GCC_RBCPR_WCSS_AHB_CLK                 227
+#define GCC_LPASS_CORE_AXIM_CLK                        228
+#define GCC_LPASS_SNOC_CFG_CLK                 229
+#define GCC_LPASS_Q6_AXIM_CLK                  230
+#define GCC_LPASS_Q6_ATBM_AT_CLK               231
+#define GCC_LPASS_Q6_PCLKDBG_CLK               232
+#define GCC_LPASS_Q6SS_TSCTR_1TO2_CLK          233
+#define GCC_LPASS_Q6SS_TRIG_CLK                        234
+#define GCC_LPASS_TBU_CLK                      235
+#define LPASS_CORE_AXIM_CLK_SRC                        236
+#define LPASS_SNOC_CFG_CLK_SRC                 237
+#define LPASS_Q6_AXIM_CLK_SRC                  238
+#define GCC_PCNOC_LPASS_CLK                    239
+#define GCC_UBI0_UTCM_CLK                      240
+#define SNOC_NSSNOC_BFDCD_CLK_SRC              241
+#define GCC_SNOC_NSSNOC_CLK                    242
+#define GCC_MEM_NOC_Q6_AXI_CLK                 243
+#define GCC_MEM_NOC_UBI32_CLK                  244
+#define GCC_MEM_NOC_LPASS_CLK                  245
+#define GCC_SNOC_LPASS_CFG_CLK                 246
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK           247
+#define GCC_QDSS_STM_CLK                       248
+#define GCC_QDSS_TRACECLKIN_CLK                        249
+#define QDSS_STM_CLK_SRC                       250
+#define QDSS_TRACECLKIN_CLK_SRC                        251
+#define GCC_NSSNOC_ATB_CLK                     252
+#endif
index de1d8a1..63e02dc 100644 (file)
 #define GCC_MSS_GPLL0_DIV_CLK_SRC                              173
 #define GCC_MSS_SNOC_AXI_CLK                                   174
 #define GCC_MSS_MNOC_BIMC_AXI_CLK                              175
+#define GCC_BIMC_GFX_CLK                                       176
 
 #define PCIE_0_GDSC                                            0
 #define UFS_GDSC                                               1
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7180.h b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
new file mode 100644 (file)
index 0000000..0e4643b
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7180_H
+
+#define GPU_CC_PLL1                    0
+#define GPU_CC_AHB_CLK                 1
+#define GPU_CC_CRC_AHB_CLK             2
+#define GPU_CC_CX_GMU_CLK              3
+#define GPU_CC_CX_SNOC_DVM_CLK         4
+#define GPU_CC_CXO_AON_CLK             5
+#define GPU_CC_CXO_CLK                 6
+#define GPU_CC_GMU_CLK_SRC             7
+
+/* CAM_CC GDSCRs */
+#define CX_GDSC                                0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8998.h b/include/dt-bindings/clock/qcom,mmcc-msm8998.h
new file mode 100644 (file)
index 0000000..ecbafdb
--- /dev/null
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8998_H
+#define _DT_BINDINGS_CLK_MSM_MMCC_8998_H
+
+#define MMPLL0                                         0
+#define MMPLL0_OUT_EVEN                                        1
+#define MMPLL1                                         2
+#define MMPLL1_OUT_EVEN                                        3
+#define MMPLL3                                         4
+#define MMPLL3_OUT_EVEN                                        5
+#define MMPLL4                                         6
+#define MMPLL4_OUT_EVEN                                        7
+#define MMPLL5                                         8
+#define MMPLL5_OUT_EVEN                                        9
+#define MMPLL6                                         10
+#define MMPLL6_OUT_EVEN                                        11
+#define MMPLL7                                         12
+#define MMPLL7_OUT_EVEN                                        13
+#define MMPLL10                                                14
+#define MMPLL10_OUT_EVEN                               15
+#define BYTE0_CLK_SRC                                  16
+#define BYTE1_CLK_SRC                                  17
+#define CCI_CLK_SRC                                    18
+#define CPP_CLK_SRC                                    19
+#define CSI0_CLK_SRC                                   20
+#define CSI1_CLK_SRC                                   21
+#define CSI2_CLK_SRC                                   22
+#define CSI3_CLK_SRC                                   23
+#define CSIPHY_CLK_SRC                                 24
+#define CSI0PHYTIMER_CLK_SRC                           25
+#define CSI1PHYTIMER_CLK_SRC                           26
+#define CSI2PHYTIMER_CLK_SRC                           27
+#define DP_AUX_CLK_SRC                                 28
+#define DP_CRYPTO_CLK_SRC                              29
+#define DP_LINK_CLK_SRC                                        30
+#define DP_PIXEL_CLK_SRC                               31
+#define ESC0_CLK_SRC                                   32
+#define ESC1_CLK_SRC                                   33
+#define EXTPCLK_CLK_SRC                                        34
+#define FD_CORE_CLK_SRC                                        35
+#define HDMI_CLK_SRC                                   36
+#define JPEG0_CLK_SRC                                  37
+#define MAXI_CLK_SRC                                   38
+#define MCLK0_CLK_SRC                                  39
+#define MCLK1_CLK_SRC                                  40
+#define MCLK2_CLK_SRC                                  41
+#define MCLK3_CLK_SRC                                  42
+#define MDP_CLK_SRC                                    43
+#define VSYNC_CLK_SRC                                  44
+#define AHB_CLK_SRC                                    45
+#define AXI_CLK_SRC                                    46
+#define PCLK0_CLK_SRC                                  47
+#define PCLK1_CLK_SRC                                  48
+#define ROT_CLK_SRC                                    49
+#define VIDEO_CORE_CLK_SRC                             50
+#define VIDEO_SUBCORE0_CLK_SRC                         51
+#define VIDEO_SUBCORE1_CLK_SRC                         52
+#define VFE0_CLK_SRC                                   53
+#define VFE1_CLK_SRC                                   54
+#define MISC_AHB_CLK                                   55
+#define VIDEO_CORE_CLK                                 56
+#define VIDEO_AHB_CLK                                  57
+#define VIDEO_AXI_CLK                                  58
+#define VIDEO_MAXI_CLK                                 59
+#define VIDEO_SUBCORE0_CLK                             60
+#define VIDEO_SUBCORE1_CLK                             61
+#define MDSS_AHB_CLK                                   62
+#define MDSS_HDMI_DP_AHB_CLK                           63
+#define MDSS_AXI_CLK                                   64
+#define MDSS_PCLK0_CLK                                 65
+#define MDSS_PCLK1_CLK                                 66
+#define MDSS_MDP_CLK                                   67
+#define MDSS_MDP_LUT_CLK                               68
+#define MDSS_EXTPCLK_CLK                               69
+#define MDSS_VSYNC_CLK                                 70
+#define MDSS_HDMI_CLK                                  71
+#define MDSS_BYTE0_CLK                                 72
+#define MDSS_BYTE1_CLK                                 73
+#define MDSS_ESC0_CLK                                  74
+#define MDSS_ESC1_CLK                                  75
+#define MDSS_ROT_CLK                                   76
+#define MDSS_DP_LINK_CLK                               77
+#define MDSS_DP_LINK_INTF_CLK                          78
+#define MDSS_DP_CRYPTO_CLK                             79
+#define MDSS_DP_PIXEL_CLK                              80
+#define MDSS_DP_AUX_CLK                                        81
+#define MDSS_BYTE0_INTF_CLK                            82
+#define MDSS_BYTE1_INTF_CLK                            83
+#define CAMSS_CSI0PHYTIMER_CLK                         84
+#define CAMSS_CSI1PHYTIMER_CLK                         85
+#define CAMSS_CSI2PHYTIMER_CLK                         86
+#define CAMSS_CSI0_CLK                                 87
+#define CAMSS_CSI0_AHB_CLK                             88
+#define CAMSS_CSI0RDI_CLK                              89
+#define CAMSS_CSI0PIX_CLK                              90
+#define CAMSS_CSI1_CLK                                 91
+#define CAMSS_CSI1_AHB_CLK                             92
+#define CAMSS_CSI1RDI_CLK                              93
+#define CAMSS_CSI1PIX_CLK                              94
+#define CAMSS_CSI2_CLK                                 95
+#define CAMSS_CSI2_AHB_CLK                             96
+#define CAMSS_CSI2RDI_CLK                              97
+#define CAMSS_CSI2PIX_CLK                              98
+#define CAMSS_CSI3_CLK                                 99
+#define CAMSS_CSI3_AHB_CLK                             100
+#define CAMSS_CSI3RDI_CLK                              101
+#define CAMSS_CSI3PIX_CLK                              102
+#define CAMSS_ISPIF_AHB_CLK                            103
+#define CAMSS_CCI_CLK                                  104
+#define CAMSS_CCI_AHB_CLK                              105
+#define CAMSS_MCLK0_CLK                                        106
+#define CAMSS_MCLK1_CLK                                        107
+#define CAMSS_MCLK2_CLK                                        108
+#define CAMSS_MCLK3_CLK                                        109
+#define CAMSS_TOP_AHB_CLK                              110
+#define CAMSS_AHB_CLK                                  111
+#define CAMSS_MICRO_AHB_CLK                            112
+#define CAMSS_JPEG0_CLK                                        113
+#define CAMSS_JPEG_AHB_CLK                             114
+#define CAMSS_JPEG_AXI_CLK                             115
+#define CAMSS_VFE0_AHB_CLK                             116
+#define CAMSS_VFE1_AHB_CLK                             117
+#define CAMSS_VFE0_CLK                                 118
+#define CAMSS_VFE1_CLK                                 119
+#define CAMSS_CPP_CLK                                  120
+#define CAMSS_CPP_AHB_CLK                              121
+#define CAMSS_VFE_VBIF_AHB_CLK                         122
+#define CAMSS_VFE_VBIF_AXI_CLK                         123
+#define CAMSS_CPP_AXI_CLK                              124
+#define CAMSS_CPP_VBIF_AHB_CLK                         125
+#define CAMSS_CSI_VFE0_CLK                             126
+#define CAMSS_CSI_VFE1_CLK                             127
+#define CAMSS_VFE0_STREAM_CLK                          128
+#define CAMSS_VFE1_STREAM_CLK                          129
+#define CAMSS_CPHY_CSID0_CLK                           130
+#define CAMSS_CPHY_CSID1_CLK                           131
+#define CAMSS_CPHY_CSID2_CLK                           132
+#define CAMSS_CPHY_CSID3_CLK                           133
+#define CAMSS_CSIPHY0_CLK                              134
+#define CAMSS_CSIPHY1_CLK                              135
+#define CAMSS_CSIPHY2_CLK                              136
+#define FD_CORE_CLK                                    137
+#define FD_CORE_UAR_CLK                                        138
+#define FD_AHB_CLK                                     139
+#define MNOC_AHB_CLK                                   140
+#define BIMC_SMMU_AHB_CLK                              141
+#define BIMC_SMMU_AXI_CLK                              142
+#define MNOC_MAXI_CLK                                  143
+#define VMEM_MAXI_CLK                                  144
+#define VMEM_AHB_CLK                                   145
+
+#define SPDM_BCR                                       0
+#define SPDM_RM_BCR                                    1
+#define MISC_BCR                                       2
+#define VIDEO_TOP_BCR                                  3
+#define THROTTLE_VIDEO_BCR                             4
+#define MDSS_BCR                                       5
+#define THROTTLE_MDSS_BCR                              6
+#define CAMSS_PHY0_BCR                                 7
+#define CAMSS_PHY1_BCR                                 8
+#define CAMSS_PHY2_BCR                                 9
+#define CAMSS_CSI0_BCR                                 10
+#define CAMSS_CSI0RDI_BCR                              11
+#define CAMSS_CSI0PIX_BCR                              12
+#define CAMSS_CSI1_BCR                                 13
+#define CAMSS_CSI1RDI_BCR                              14
+#define CAMSS_CSI1PIX_BCR                              15
+#define CAMSS_CSI2_BCR                                 16
+#define CAMSS_CSI2RDI_BCR                              17
+#define CAMSS_CSI2PIX_BCR                              18
+#define CAMSS_CSI3_BCR                                 19
+#define CAMSS_CSI3RDI_BCR                              20
+#define CAMSS_CSI3PIX_BCR                              21
+#define CAMSS_ISPIF_BCR                                        22
+#define CAMSS_CCI_BCR                                  23
+#define CAMSS_TOP_BCR                                  24
+#define CAMSS_AHB_BCR                                  25
+#define CAMSS_MICRO_BCR                                        26
+#define CAMSS_JPEG_BCR                                 27
+#define CAMSS_VFE0_BCR                                 28
+#define CAMSS_VFE1_BCR                                 29
+#define CAMSS_VFE_VBIF_BCR                             30
+#define CAMSS_CPP_TOP_BCR                              31
+#define CAMSS_CPP_BCR                                  32
+#define CAMSS_CSI_VFE0_BCR                             33
+#define CAMSS_CSI_VFE1_BCR                             34
+#define CAMSS_FD_BCR                                   35
+#define THROTTLE_CAMSS_BCR                             36
+#define MNOCAHB_BCR                                    37
+#define MNOCAXI_BCR                                    38
+#define BMIC_SMMU_BCR                                  39
+#define MNOC_MAXI_BCR                                  40
+#define VMEM_BCR                                       41
+#define BTO_BCR                                                42
+
+#define VIDEO_TOP_GDSC         1
+#define VIDEO_SUBCORE0_GDSC    2
+#define VIDEO_SUBCORE1_GDSC    3
+#define MDSS_GDSC              4
+#define CAMSS_TOP_GDSC         5
+#define CAMSS_VFE0_GDSC                6
+#define CAMSS_VFE1_GDSC                7
+#define CAMSS_CPP_GDSC         8
+#define BIMC_SMMU_GDSC         9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sc7180.h b/include/dt-bindings/clock/qcom,videocc-sc7180.h
new file mode 100644 (file)
index 0000000..7acaf13
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7180_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0                             0
+#define VIDEO_CC_VCODEC0_AXI_CLK               1
+#define VIDEO_CC_VCODEC0_CORE_CLK              2
+#define VIDEO_CC_VENUS_AHB_CLK                 3
+#define VIDEO_CC_VENUS_CLK_SRC                 4
+#define VIDEO_CC_VENUS_CTL_AXI_CLK             5
+#define VIDEO_CC_VENUS_CTL_CORE_CLK            6
+#define VIDEO_CC_XO_CLK                                7
+
+/* VIDEO_CC GDSCRs */
+#define VENUS_GDSC                             0
+#define VCODEC0_GDSC                           1
+
+#endif
index a8ac4cf..e512a1c 100644 (file)
@@ -46,6 +46,7 @@
 #define CLK_PLL_VIDEO0         7
 #define CLK_PLL_PERIPH0                11
 
+#define CLK_CPUX               21
 #define CLK_BUS_MIPI_DSI       28
 #define CLK_BUS_CE             29
 #define CLK_BUS_DMA            30
index c5d1334..39878d9 100644 (file)
@@ -49,6 +49,8 @@
 
 #define CLK_PLL_VIDEO1_2X      13
 
+#define CLK_PLL_MIPI           15
+
 #define CLK_CPU                        18
 
 #define CLK_AHB1_MIPIDSI       23
index f8222b6..eb524d0 100644 (file)
@@ -43,6 +43,8 @@
 #ifndef _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
 #define _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
 
+#define CLK_PLL_MIPI           13
+
 #define CLK_CPUX               18
 
 #define CLK_BUS_MIPI_DSI       23
index f9e15a2..d7337b5 100644 (file)
 #define CLK_AVS                        152
 #define CLK_HDMI               153
 #define CLK_HDMI_SLOW          154
-
+#define CLK_MBUS               155
 #define CLK_DSI_DPHY           156
 #define CLK_TVE0               157
 #define CLK_TVE1               158
diff --git a/include/dt-bindings/clock/ti-dra7-atl.h b/include/dt-bindings/clock/ti-dra7-atl.h
new file mode 100644 (file)
index 0000000..42dd416
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * This header provides constants for DRA7 ATL (Audio Tracking Logic)
+ *
+ * The constants defined in this header are used in dts files
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H
+#define _DT_BINDINGS_CLK_DRA7_ATL_H
+
+#define DRA7_ATL_WS_MCASP1_FSR         0
+#define DRA7_ATL_WS_MCASP1_FSX         1
+#define DRA7_ATL_WS_MCASP2_FSR         2
+#define DRA7_ATL_WS_MCASP2_FSX         3
+#define DRA7_ATL_WS_MCASP3_FSX         4
+#define DRA7_ATL_WS_MCASP4_FSX         5
+#define DRA7_ATL_WS_MCASP5_FSX         6
+#define DRA7_ATL_WS_MCASP6_FSX         7
+#define DRA7_ATL_WS_MCASP7_FSX         8
+#define DRA7_ATL_WS_MCASP8_FSX         9
+#define DRA7_ATL_WS_MCASP8_AHCLKX      10
+#define DRA7_ATL_WS_XREF_CLK3          11
+#define DRA7_ATL_WS_XREF_CLK0          12
+#define DRA7_ATL_WS_XREF_CLK1          13
+#define DRA7_ATL_WS_XREF_CLK2          14
+#define DRA7_ATL_WS_OSC1_X1            15
+
+#endif
diff --git a/include/dt-bindings/clock/xlnx-versal-clk.h b/include/dt-bindings/clock/xlnx-versal-clk.h
new file mode 100644 (file)
index 0000000..264d634
--- /dev/null
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Xilinx Inc.
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_VERSAL_H
+#define _DT_BINDINGS_CLK_VERSAL_H
+
+#define PMC_PLL                                        1
+#define APU_PLL                                        2
+#define RPU_PLL                                        3
+#define CPM_PLL                                        4
+#define NOC_PLL                                        5
+#define PLL_MAX                                        6
+#define PMC_PRESRC                             7
+#define PMC_POSTCLK                            8
+#define PMC_PLL_OUT                            9
+#define PPLL                                   10
+#define NOC_PRESRC                             11
+#define NOC_POSTCLK                            12
+#define NOC_PLL_OUT                            13
+#define NPLL                                   14
+#define APU_PRESRC                             15
+#define APU_POSTCLK                            16
+#define APU_PLL_OUT                            17
+#define APLL                                   18
+#define RPU_PRESRC                             19
+#define RPU_POSTCLK                            20
+#define RPU_PLL_OUT                            21
+#define RPLL                                   22
+#define CPM_PRESRC                             23
+#define CPM_POSTCLK                            24
+#define CPM_PLL_OUT                            25
+#define CPLL                                   26
+#define PPLL_TO_XPD                            27
+#define NPLL_TO_XPD                            28
+#define APLL_TO_XPD                            29
+#define RPLL_TO_XPD                            30
+#define EFUSE_REF                              31
+#define SYSMON_REF                             32
+#define IRO_SUSPEND_REF                                33
+#define USB_SUSPEND                            34
+#define SWITCH_TIMEOUT                         35
+#define RCLK_PMC                               36
+#define RCLK_LPD                               37
+#define WDT                                    38
+#define TTC0                                   39
+#define TTC1                                   40
+#define TTC2                                   41
+#define TTC3                                   42
+#define GEM_TSU                                        43
+#define GEM_TSU_LB                             44
+#define MUXED_IRO_DIV2                         45
+#define MUXED_IRO_DIV4                         46
+#define PSM_REF                                        47
+#define GEM0_RX                                        48
+#define GEM0_TX                                        49
+#define GEM1_RX                                        50
+#define GEM1_TX                                        51
+#define CPM_CORE_REF                           52
+#define CPM_LSBUS_REF                          53
+#define CPM_DBG_REF                            54
+#define CPM_AUX0_REF                           55
+#define CPM_AUX1_REF                           56
+#define QSPI_REF                               57
+#define OSPI_REF                               58
+#define SDIO0_REF                              59
+#define SDIO1_REF                              60
+#define PMC_LSBUS_REF                          61
+#define I2C_REF                                        62
+#define TEST_PATTERN_REF                       63
+#define DFT_OSC_REF                            64
+#define PMC_PL0_REF                            65
+#define PMC_PL1_REF                            66
+#define PMC_PL2_REF                            67
+#define PMC_PL3_REF                            68
+#define CFU_REF                                        69
+#define SPARE_REF                              70
+#define NPI_REF                                        71
+#define HSM0_REF                               72
+#define HSM1_REF                               73
+#define SD_DLL_REF                             74
+#define FPD_TOP_SWITCH                         75
+#define FPD_LSBUS                              76
+#define ACPU                                   77
+#define DBG_TRACE                              78
+#define DBG_FPD                                        79
+#define LPD_TOP_SWITCH                         80
+#define ADMA                                   81
+#define LPD_LSBUS                              82
+#define CPU_R5                                 83
+#define CPU_R5_CORE                            84
+#define CPU_R5_OCM                             85
+#define CPU_R5_OCM2                            86
+#define IOU_SWITCH                             87
+#define GEM0_REF                               88
+#define GEM1_REF                               89
+#define GEM_TSU_REF                            90
+#define USB0_BUS_REF                           91
+#define UART0_REF                              92
+#define UART1_REF                              93
+#define SPI0_REF                               94
+#define SPI1_REF                               95
+#define CAN0_REF                               96
+#define CAN1_REF                               97
+#define I2C0_REF                               98
+#define I2C1_REF                               99
+#define DBG_LPD                                        100
+#define TIMESTAMP_REF                          101
+#define DBG_TSTMP                              102
+#define CPM_TOPSW_REF                          103
+#define USB3_DUAL_REF                          104
+#define OUTCLK_MAX                             105
+#define REF_CLK                                        106
+#define PL_ALT_REF_CLK                         107
+#define MUXED_IRO                              108
+#define PL_EXT                                 109
+#define PL_LB                                  110
+#define MIO_50_OR_51                           111
+#define MIO_24_OR_25                           112
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq6018.h b/include/dt-bindings/reset/qcom,gcc-ipq6018.h
new file mode 100644 (file)
index 0000000..02a220a
--- /dev/null
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_6018_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_6018_H
+
+#define GCC_BLSP1_BCR                          0
+#define GCC_BLSP1_QUP1_BCR                     1
+#define GCC_BLSP1_UART1_BCR                    2
+#define GCC_BLSP1_QUP2_BCR                     3
+#define GCC_BLSP1_UART2_BCR                    4
+#define GCC_BLSP1_QUP3_BCR                     5
+#define GCC_BLSP1_UART3_BCR                    6
+#define GCC_BLSP1_QUP4_BCR                     7
+#define GCC_BLSP1_UART4_BCR                    8
+#define GCC_BLSP1_QUP5_BCR                     9
+#define GCC_BLSP1_UART5_BCR                    10
+#define GCC_BLSP1_QUP6_BCR                     11
+#define GCC_BLSP1_UART6_BCR                    12
+#define GCC_IMEM_BCR                           13
+#define GCC_SMMU_BCR                           14
+#define GCC_APSS_TCU_BCR                       15
+#define GCC_SMMU_XPU_BCR                       16
+#define GCC_PCNOC_TBU_BCR                      17
+#define GCC_SMMU_CFG_BCR                       18
+#define GCC_PRNG_BCR                           19
+#define GCC_BOOT_ROM_BCR                       20
+#define GCC_CRYPTO_BCR                         21
+#define GCC_WCSS_BCR                           22
+#define GCC_WCSS_Q6_BCR                                23
+#define GCC_NSS_BCR                            24
+#define GCC_SEC_CTRL_BCR                       25
+#define GCC_DDRSS_BCR                          26
+#define GCC_SYSTEM_NOC_BCR                     27
+#define GCC_PCNOC_BCR                          28
+#define GCC_TCSR_BCR                           29
+#define GCC_QDSS_BCR                           30
+#define GCC_DCD_BCR                            31
+#define GCC_MSG_RAM_BCR                                32
+#define GCC_MPM_BCR                            33
+#define GCC_SPDM_BCR                           34
+#define GCC_RBCPR_BCR                          35
+#define GCC_RBCPR_MX_BCR                       36
+#define GCC_TLMM_BCR                           37
+#define GCC_RBCPR_WCSS_BCR                     38
+#define GCC_USB0_PHY_BCR                       39
+#define GCC_USB3PHY_0_PHY_BCR                  40
+#define GCC_USB0_BCR                           41
+#define GCC_USB1_BCR                           42
+#define GCC_QUSB2_0_PHY_BCR                    43
+#define GCC_QUSB2_1_PHY_BCR                    44
+#define GCC_SDCC1_BCR                          45
+#define GCC_SNOC_BUS_TIMEOUT0_BCR              46
+#define GCC_SNOC_BUS_TIMEOUT1_BCR              47
+#define GCC_SNOC_BUS_TIMEOUT2_BCR              48
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR             49
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR             50
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR             51
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR             52
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR             53
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR             54
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR             55
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR             56
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR             57
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR             58
+#define GCC_UNIPHY0_BCR                                59
+#define GCC_UNIPHY1_BCR                                60
+#define GCC_CMN_12GPLL_BCR                     61
+#define GCC_QPIC_BCR                           62
+#define GCC_MDIO_BCR                           63
+#define GCC_WCSS_CORE_TBU_BCR                  64
+#define GCC_WCSS_Q6_TBU_BCR                    65
+#define GCC_USB0_TBU_BCR                       66
+#define GCC_PCIE0_TBU_BCR                      67
+#define GCC_PCIE0_BCR                          68
+#define GCC_PCIE0_PHY_BCR                      69
+#define GCC_PCIE0PHY_PHY_BCR                   70
+#define GCC_PCIE0_LINK_DOWN_BCR                        71
+#define GCC_DCC_BCR                            72
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR    73
+#define GCC_SMMU_CATS_BCR                      74
+#define GCC_UBI0_AXI_ARES                      75
+#define GCC_UBI0_AHB_ARES                      76
+#define GCC_UBI0_NC_AXI_ARES                   77
+#define GCC_UBI0_DBG_ARES                      78
+#define GCC_UBI0_CORE_CLAMP_ENABLE             79
+#define GCC_UBI0_CLKRST_CLAMP_ENABLE           80
+#define GCC_UBI0_UTCM_ARES                     81
+#define GCC_NSS_CFG_ARES                       82
+#define GCC_NSS_NOC_ARES                       83
+#define GCC_NSS_CRYPTO_ARES                    84
+#define GCC_NSS_CSR_ARES                       85
+#define GCC_NSS_CE_APB_ARES                    86
+#define GCC_NSS_CE_AXI_ARES                    87
+#define GCC_NSSNOC_CE_APB_ARES                 88
+#define GCC_NSSNOC_CE_AXI_ARES                 89
+#define GCC_NSSNOC_UBI0_AHB_ARES               90
+#define GCC_NSSNOC_SNOC_ARES                   91
+#define GCC_NSSNOC_CRYPTO_ARES                 92
+#define GCC_NSSNOC_ATB_ARES                    93
+#define GCC_NSSNOC_QOSGEN_REF_ARES             94
+#define GCC_NSSNOC_TIMEOUT_REF_ARES            95
+#define GCC_PCIE0_PIPE_ARES                    96
+#define GCC_PCIE0_SLEEP_ARES                   97
+#define GCC_PCIE0_CORE_STICKY_ARES             98
+#define GCC_PCIE0_AXI_MASTER_ARES              99
+#define GCC_PCIE0_AXI_SLAVE_ARES               100
+#define GCC_PCIE0_AHB_ARES                     101
+#define GCC_PCIE0_AXI_MASTER_STICKY_ARES       102
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES                103
+#define GCC_PPE_FULL_RESET                     104
+#define GCC_UNIPHY0_SOFT_RESET                 105
+#define GCC_UNIPHY0_XPCS_RESET                 106
+#define GCC_UNIPHY1_SOFT_RESET                 107
+#define GCC_UNIPHY1_XPCS_RESET                 108
+#define GCC_EDMA_HW_RESET                      109
+#define GCC_ADSS_BCR                           110
+#define GCC_NSS_NOC_TBU_BCR                    111
+#define GCC_NSSPORT1_RESET                     112
+#define GCC_NSSPORT2_RESET                     113
+#define GCC_NSSPORT3_RESET                     114
+#define GCC_NSSPORT4_RESET                     115
+#define GCC_NSSPORT5_RESET                     116
+#define GCC_UNIPHY0_PORT1_ARES                 117
+#define GCC_UNIPHY0_PORT2_ARES                 118
+#define GCC_UNIPHY0_PORT3_ARES                 119
+#define GCC_UNIPHY0_PORT4_ARES                 120
+#define GCC_UNIPHY0_PORT5_ARES                 121
+#define GCC_UNIPHY0_PORT_4_5_RESET             122
+#define GCC_UNIPHY0_PORT_4_RESET               123
+#define GCC_LPASS_BCR                          124
+#define GCC_UBI32_TBU_BCR                      125
+#define GCC_LPASS_TBU_BCR                      126
+#define GCC_WCSSAON_RESET                      127
+#define GCC_LPASS_Q6_AXIM_ARES                 128
+#define GCC_LPASS_Q6SS_TSCTR_1TO2_ARES         129
+#define GCC_LPASS_Q6SS_TRIG_ARES               130
+#define GCC_LPASS_Q6_ATBM_AT_ARES              131
+#define GCC_LPASS_Q6_PCLKDBG_ARES              132
+#define GCC_LPASS_CORE_AXIM_ARES               133
+#define GCC_LPASS_SNOC_CFG_ARES                        134
+#define GCC_WCSS_DBG_ARES                      135
+#define GCC_WCSS_ECAHB_ARES                    136
+#define GCC_WCSS_ACMT_ARES                     137
+#define GCC_WCSS_DBG_BDG_ARES                  138
+#define GCC_WCSS_AHB_S_ARES                    139
+#define GCC_WCSS_AXI_M_ARES                    140
+#define GCC_Q6SS_DBG_ARES                      141
+#define GCC_Q6_AHB_S_ARES                      142
+#define GCC_Q6_AHB_ARES                                143
+#define GCC_Q6_AXIM2_ARES                      144
+#define GCC_Q6_AXIM_ARES                       145
+#define GCC_UBI0_CORE_ARES                     146
+
+#endif
index 80ad521..e52ceb1 100644 (file)
@@ -186,7 +186,7 @@ bitmap_find_next_zero_area(unsigned long *map,
                                              align_mask, 0);
 }
 
-extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
+extern int bitmap_parse(const char *buf, unsigned int buflen,
                        unsigned long *dst, int nbits);
 extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
                        unsigned long *dst, int nbits);
@@ -454,12 +454,6 @@ static inline void bitmap_replace(unsigned long *dst,
                __bitmap_replace(dst, old, new, mask, nbits);
 }
 
-static inline int bitmap_parse(const char *buf, unsigned int buflen,
-                       unsigned long *maskp, int nmaskbits)
-{
-       return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
-}
-
 static inline void bitmap_next_clear_region(unsigned long *bitmap,
                                            unsigned int *rs, unsigned int *re,
                                            unsigned int end)
index 6c7c413..47f54b4 100644 (file)
 #  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
 #endif
 
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BITS_PER_TYPE(type)    (sizeof(type) * BITS_PER_BYTE)
 #define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr)                DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr)                DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
 #define BITS_TO_BYTES(nr)      DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
 
 extern unsigned int __sw_hweight8(unsigned int w);
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
new file mode 100644 (file)
index 0000000..7e18c93
--- /dev/null
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_XBC_H
+#define _LINUX_XBC_H
+/*
+ * Extra Boot Config
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* XBC tree node */
+struct xbc_node {
+       u16 next;
+       u16 child;
+       u16 parent;
+       u16 data;
+} __attribute__ ((__packed__));
+
+#define XBC_KEY                0
+#define XBC_VALUE      (1 << 15)
+/* Maximum size of boot config is 32KB - 1 */
+#define XBC_DATA_MAX   (XBC_VALUE - 1)
+
+#define XBC_NODE_MAX   1024
+#define XBC_KEYLEN_MAX 256
+#define XBC_DEPTH_MAX  16
+
+/* Node tree access raw APIs */
+struct xbc_node * __init xbc_root_node(void);
+int __init xbc_node_index(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node);
+const char * __init xbc_node_get_data(struct xbc_node *node);
+
+/**
+ * xbc_node_is_value() - Test the node is a value node
+ * @node: An XBC node.
+ *
+ * Test the @node is a value node and return true if a value node, false if not.
+ */
+static inline __init bool xbc_node_is_value(struct xbc_node *node)
+{
+       return node->data & XBC_VALUE;
+}
+
+/**
+ * xbc_node_is_key() - Test the node is a key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a key node and return true if a key node, false if not.
+ */
+static inline __init bool xbc_node_is_key(struct xbc_node *node)
+{
+       return !xbc_node_is_value(node);
+}
+
+/**
+ * xbc_node_is_array() - Test the node is an arraied value node
+ * @node: An XBC node.
+ *
+ * Test the @node is an arraied value node.
+ */
+static inline __init bool xbc_node_is_array(struct xbc_node *node)
+{
+       return xbc_node_is_value(node) && node->next != 0;
+}
+
+/**
+ * xbc_node_is_leaf() - Test the node is a leaf key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a leaf key node which is a key node and has a value node
+ * or no child. Returns true if it is a leaf node, or false if not.
+ */
+static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
+{
+       return xbc_node_is_key(node) &&
+               (!node->child || xbc_node_is_value(xbc_node_get_child(node)));
+}
+
+/* Tree-based key-value access APIs */
+struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
+                                            const char *key);
+
+const char * __init xbc_node_find_value(struct xbc_node *parent,
+                                       const char *key,
+                                       struct xbc_node **vnode);
+
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+                                                struct xbc_node *leaf);
+
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+                                                struct xbc_node **leaf);
+
+/**
+ * xbc_find_value() - Find a value which matches the key
+ * @key: Search key
+ * @vnode: A container pointer of XBC value node.
+ *
+ * Search a value whose key matches @key from whole of XBC tree and return
+ * the value if found. Found value node is stored in *@vnode.
+ * Note that this can return 0-length string and store NULL in *@vnode for
+ * key-only (non-value) entry.
+ */
+static inline const char * __init
+xbc_find_value(const char *key, struct xbc_node **vnode)
+{
+       return xbc_node_find_value(NULL, key, vnode);
+}
+
+/**
+ * xbc_find_node() - Find a node which matches the key
+ * @key: Search key
+ *
+ * Search a (key) node whose key matches @key from whole of XBC tree and
+ * return the node if found. If not found, returns NULL.
+ */
+static inline struct xbc_node * __init xbc_find_node(const char *key)
+{
+       return xbc_node_find_child(NULL, key);
+}
+
+/**
+ * xbc_array_for_each_value() - Iterate value nodes on an array
+ * @anode: An XBC arraied value node
+ * @value: A value
+ *
+ * Iterate array value nodes and values starts from @anode. This is expected to
+ * be used with xbc_find_value() and xbc_node_find_value(), so that user can
+ * process each array entry node.
+ */
+#define xbc_array_for_each_value(anode, value)                         \
+       for (value = xbc_node_get_data(anode); anode != NULL ;          \
+            anode = xbc_node_get_next(anode),                          \
+            value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_child() - Iterate child nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate child nodes of @parent. Each child nodes are stored to @child.
+ */
+#define xbc_node_for_each_child(parent, child)                         \
+       for (child = xbc_node_get_child(parent); child != NULL ;        \
+            child = xbc_node_get_next(child))
+
+/**
+ * xbc_node_for_each_array_value() - Iterate array entries of geven key
+ * @node: An XBC node.
+ * @key: A key string searched under @node
+ * @anode: Iterated XBC node of array entry.
+ * @value: Iterated value of array entry.
+ *
+ * Iterate array entries of given @key under @node. Each array entry node
+ * is stroed to @anode and @value. If the @node doesn't have @key node,
+ * it does nothing.
+ * Note that even if the found key node has only one value (not array)
+ * this executes block once. Hoever, if the found key node has no value
+ * (key-only node), this does nothing. So don't use this for testing the
+ * key-value pair existence.
+ */
+#define xbc_node_for_each_array_value(node, key, anode, value)         \
+       for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
+            anode = xbc_node_get_next(anode),                          \
+            value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_key_value() - Iterate key-value pairs under a node
+ * @node: An XBC node.
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs under @node. Each key node and value string are
+ * stored in @knode and @value respectively.
+ */
+#define xbc_node_for_each_key_value(node, knode, value)                        \
+       for (knode = NULL, value = xbc_node_find_next_key_value(node, &knode);\
+            knode != NULL; value = xbc_node_find_next_key_value(node, &knode))
+
+/**
+ * xbc_for_each_key_value() - Iterate key-value pairs
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs in whole XBC tree. Each key node and value string
+ * are stored in @knode and @value respectively.
+ */
+#define xbc_for_each_key_value(knode, value)                           \
+       xbc_node_for_each_key_value(NULL, knode, value)
+
+/* Compose partial key */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+                       struct xbc_node *node, char *buf, size_t size);
+
+/**
+ * xbc_node_compose_key() - Compose full key string of the XBC node
+ * @node: An XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the full-length key of the @node into @buf. Returns the total
+ * length of the key stored in @buf. Or returns -EINVAL if @node is NULL,
+ * and -ERANGE if the key depth is deeper than max depth.
+ */
+static inline int __init xbc_node_compose_key(struct xbc_node *node,
+                                             char *buf, size_t size)
+{
+       return xbc_node_compose_key_after(NULL, node, buf, size);
+}
+
+/* XBC node initializer */
+int __init xbc_init(char *buf);
+
+/* XBC cleanup data structures */
+void __init xbc_destroy_all(void);
+
+/* Debug dump functions */
+void __init xbc_debug_dump(void);
+
+#endif
index 0067d76..35d3852 100644 (file)
@@ -25,8 +25,9 @@ struct ceph_mdsmap {
        u32 m_session_timeout;          /* seconds */
        u32 m_session_autoclose;        /* seconds */
        u64 m_max_file_size;
-       u32 m_max_mds;                  /* size of m_addr, m_state arrays */
-       int m_num_mds;
+       u32 m_max_mds;                  /* expected up:active mds number */
+       u32 m_num_active_mds;           /* actual up:active mds number */
+       u32 possible_max_rank;          /* possible max rank index */
        struct ceph_mds_info *m_info;
 
        /* which object pools file data can be stored in */
@@ -42,7 +43,7 @@ struct ceph_mdsmap {
 static inline struct ceph_entity_addr *
 ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
 {
-       if (w >= m->m_num_mds)
+       if (w >= m->possible_max_rank)
                return NULL;
        return &m->m_info[w].addr;
 }
@@ -50,14 +51,14 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
 static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
 {
        BUG_ON(w < 0);
-       if (w >= m->m_num_mds)
+       if (w >= m->possible_max_rank)
                return CEPH_MDS_STATE_DNE;
        return m->m_info[w].state;
 }
 
 static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
 {
-       if (w >= 0 && w < m->m_num_mds)
+       if (w >= 0 && w < m->possible_max_rank)
                return m->m_info[w].laggy;
        return false;
 }
index eaffbdd..5a62dbd 100644 (file)
@@ -534,6 +534,7 @@ int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
                        struct ceph_object_id *dst_oid,
                        struct ceph_object_locator *dst_oloc,
                        u32 dst_fadvise_flags,
+                       u32 truncate_seq, u64 truncate_size,
                        u8 copy_from_flags);
 
 /* watch/notify */
index 3eb0e55..59bdfd4 100644 (file)
@@ -256,6 +256,7 @@ extern const char *ceph_osd_state_name(int s);
                                                                            \
        /* tiering */                                                       \
        f(COPY_FROM,    __CEPH_OSD_OP(WR, DATA, 26),    "copy-from")        \
+       f(COPY_FROM2,   __CEPH_OSD_OP(WR, DATA, 45),    "copy-from2")       \
        f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
        f(UNDIRTY,      __CEPH_OSD_OP(WR, DATA, 28),    "undirty")          \
        f(ISDIRTY,      __CEPH_OSD_OP(RD, DATA, 29),    "isdirty")          \
@@ -446,6 +447,7 @@ enum {
        CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
                                                     * cloneid */
        CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16,     /* order with write */
+       CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ = 32,  /* send truncate_{seq,size} */
 };
 
 enum {
index caf4b9d..952ac03 100644 (file)
@@ -190,8 +190,14 @@ struct clk_duty {
  *
  * @init:      Perform platform-specific initialization magic.
  *             This is not not used by any of the basic clock types.
- *             Please consider other ways of solving initialization problems
- *             before using this callback, as its use is discouraged.
+ *             This callback exist for HW which needs to perform some
+ *             initialisation magic for CCF to get an accurate view of the
+ *             clock. It may also be used dynamic resource allocation is
+ *             required. It shall not used to deal with clock parameters,
+ *             such as rate or parents.
+ *             Returns 0 on success, -EERROR otherwise.
+ *
+ * @terminate:  Free any resource allocated by init.
  *
  * @debug_init:        Set up type-specific debugfs entries for this clock.  This
  *             is called once, after the debugfs directory entry for this
@@ -243,7 +249,8 @@ struct clk_ops {
                                          struct clk_duty *duty);
        int             (*set_duty_cycle)(struct clk_hw *hw,
                                          struct clk_duty *duty);
-       void            (*init)(struct clk_hw *hw);
+       int             (*init)(struct clk_hw *hw);
+       void            (*terminate)(struct clk_hw *hw);
        void            (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
 };
 
@@ -321,29 +328,119 @@ struct clk_hw {
  * struct clk_fixed_rate - fixed-rate clock
  * @hw:                handle between common and hardware-specific interfaces
  * @fixed_rate:        constant frequency of clock
+ * @fixed_accuracy: constant accuracy of clock in ppb (parts per billion)
+ * @flags:     hardware specific flags
+ *
+ * Flags:
+ * * CLK_FIXED_RATE_PARENT_ACCURACY - Use the accuracy of the parent clk
+ *                                    instead of what's set in @fixed_accuracy.
  */
 struct clk_fixed_rate {
        struct          clk_hw hw;
        unsigned long   fixed_rate;
        unsigned long   fixed_accuracy;
+       unsigned long   flags;
 };
 
-#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+#define CLK_FIXED_RATE_PARENT_ACCURACY         BIT(0)
 
 extern const struct clk_ops clk_fixed_rate_ops;
+struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+               struct device_node *np, const char *name,
+               const char *parent_name, const struct clk_hw *parent_hw,
+               const struct clk_parent_data *parent_data, unsigned long flags,
+               unsigned long fixed_rate, unsigned long fixed_accuracy,
+               unsigned long clk_fixed_flags);
 struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                unsigned long fixed_rate);
-struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
-               unsigned long fixed_rate);
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
-               const char *name, const char *parent_name, unsigned long flags,
-               unsigned long fixed_rate, unsigned long fixed_accuracy);
+/**
+ * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate)  \
+       __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+                                    NULL, (flags), (fixed_rate), 0, 0)
+/**
+ * clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags,     \
+                                            fixed_rate)                      \
+       __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw),  \
+                                    NULL, (flags), (fixed_rate), 0, 0)
+/**
+ * clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_hw, flags,   \
+                                            fixed_rate)                      \
+       __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL,         \
+                                    (parent_data), (flags), (fixed_rate), 0, \
+                                    0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,      \
+                                                flags, fixed_rate,           \
+                                                fixed_accuracy)              \
+       __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name),      \
+                                    NULL, NULL, (flags), (fixed_rate),       \
+                                    (fixed_accuracy), 0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name,        \
+               parent_hw, flags, fixed_rate, fixed_accuracy)                 \
+       __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw)   \
+                                    NULL, NULL, (flags), (fixed_rate),       \
+                                    (fixed_accuracy), 0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_data(dev, name,              \
+               parent_data, flags, fixed_rate, fixed_accuracy)               \
+       __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL,         \
+                                    (parent_data), NULL, (flags),            \
+                                    (fixed_rate), (fixed_accuracy), 0)
+
 void clk_unregister_fixed_rate(struct clk *clk);
-struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
-               const char *name, const char *parent_name, unsigned long flags,
-               unsigned long fixed_rate, unsigned long fixed_accuracy);
 void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
 
 void of_fixed_clk_setup(struct device_node *np);
@@ -386,14 +483,67 @@ struct clk_gate {
 #define CLK_GATE_BIG_ENDIAN            BIT(2)
 
 extern const struct clk_ops clk_gate_ops;
-struct clk *clk_register_gate(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
+struct clk_hw *__clk_hw_register_gate(struct device *dev,
+               struct device_node *np, const char *name,
+               const char *parent_name, const struct clk_hw *parent_hw,
+               const struct clk_parent_data *parent_data,
+               unsigned long flags,
                void __iomem *reg, u8 bit_idx,
                u8 clk_gate_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
+struct clk *clk_register_gate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 bit_idx,
                u8 clk_gate_flags, spinlock_t *lock);
+/**
+ * clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,     \
+                            clk_gate_flags, lock)                            \
+       __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
+                              NULL, (flags), (reg), (bit_idx),               \
+                              (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_hw - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_hw(dev, name, parent_name, flags, reg,    \
+                                      bit_idx, clk_gate_flags, lock)         \
+       __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
+                              NULL, (flags), (reg), (bit_idx),               \
+                              (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_data - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_data(dev, name, parent_name, flags, reg,  \
+                                      bit_idx, clk_gate_flags, lock)         \
+       __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
+                              NULL, (flags), (reg), (bit_idx),               \
+                              (clk_gate_flags), (lock))
 void clk_unregister_gate(struct clk *clk);
 void clk_hw_unregister_gate(struct clk_hw *hw);
 int clk_gate_is_enabled(struct clk_hw *hw);
@@ -483,24 +633,153 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate,
                const struct clk_div_table *table, u8 width,
                unsigned long flags);
 
-struct clk *clk_register_divider(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_divider_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
-               const char *parent_name, unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_divider_flags, spinlock_t *lock);
+struct clk_hw *__clk_hw_register_divider(struct device *dev,
+               struct device_node *np, const char *name,
+               const char *parent_name, const struct clk_hw *parent_hw,
+               const struct clk_parent_data *parent_data, unsigned long flags,
+               void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+               const struct clk_div_table *table, spinlock_t *lock);
 struct clk *clk_register_divider_table(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
                u8 clk_divider_flags, const struct clk_div_table *table,
                spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider_table(struct device *dev,
-               const char *name, const char *parent_name, unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_divider_flags, const struct clk_div_table *table,
-               spinlock_t *lock);
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_register_divider(dev, name, parent_name, flags, reg, shift, width, \
+                            clk_divider_flags, lock)                          \
+       clk_register_divider_table((dev), (name), (parent_name), (flags),      \
+                                  (reg), (shift), (width),                    \
+                                  (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider(dev, name, parent_name, flags, reg, shift,    \
+                               width, clk_divider_flags, lock)               \
+       __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL,   \
+                                 NULL, (flags), (reg), (shift), (width),     \
+                                 (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_hw - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, reg,   \
+                                         shift, width, clk_divider_flags,    \
+                                         lock)                               \
+       __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw),     \
+                                 NULL, (flags), (reg), (shift), (width),     \
+                                 (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_data - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_data(dev, name, parent_data, flags,    \
+                                           reg, shift, width,                \
+                                           clk_divider_flags, lock)          \
+       __clk_hw_register_divider((dev), NULL, (name), NULL, NULL,            \
+                                 (parent_data), (flags), (reg), (shift),     \
+                                 (width), (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_table - register a table based divider clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table(dev, name, parent_name, flags, reg,     \
+                                     shift, width, clk_divider_flags, table, \
+                                     lock)                                   \
+       __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL,   \
+                                 NULL, (flags), (reg), (shift), (width),     \
+                                 (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_hw - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_hw(dev, name, parent_hw, flags,  \
+                                               reg, shift, width,            \
+                                               clk_divider_flags, table,     \
+                                               lock)                         \
+       __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw),     \
+                                 NULL, (flags), (reg), (shift), (width),     \
+                                 (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_data - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_data(dev, name, parent_data,     \
+                                                 flags, reg, shift, width,   \
+                                                 clk_divider_flags, table,   \
+                                                 lock)                       \
+       __clk_hw_register_divider((dev), NULL, (name), NULL, NULL,            \
+                                 (parent_data), (flags), (reg), (shift),     \
+                                 (width), (clk_divider_flags), (table),      \
+                                 (lock))
+
 void clk_unregister_divider(struct clk *clk);
 void clk_hw_unregister_divider(struct clk_hw *hw);
 
@@ -555,28 +834,48 @@ struct clk_mux {
 extern const struct clk_ops clk_mux_ops;
 extern const struct clk_ops clk_mux_ro_ops;
 
-struct clk *clk_register_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents,
-               unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_mux_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents,
-               unsigned long flags,
-               void __iomem *reg, u8 shift, u8 width,
-               u8 clk_mux_flags, spinlock_t *lock);
-
-struct clk *clk_register_mux_table(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents,
-               unsigned long flags,
-               void __iomem *reg, u8 shift, u32 mask,
+struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
+               const char *name, u8 num_parents,
+               const char * const *parent_names,
+               const struct clk_hw **parent_hws,
+               const struct clk_parent_data *parent_data,
+               unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
+struct clk *clk_register_mux_table(struct device *dev, const char *name,
                const char * const *parent_names, u8 num_parents,
-               unsigned long flags,
-               void __iomem *reg, u8 shift, u32 mask,
+               unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock);
 
+#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg,    \
+                        shift, width, clk_mux_flags, lock)                   \
+       clk_register_mux_table((dev), (name), (parent_names), (num_parents),  \
+                              (flags), (reg), (shift), BIT((width)) - 1,     \
+                              (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_table(dev, name, parent_names, num_parents,              \
+                                 flags, reg, shift, mask, clk_mux_flags,     \
+                                 table, lock)                                \
+       __clk_hw_register_mux((dev), NULL, (name), (num_parents),             \
+                             (parent_names), NULL, NULL, (flags), (reg),     \
+                             (shift), (mask), (clk_mux_flags), (table),      \
+                             (lock))
+#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+                           shift, width, clk_mux_flags, lock)                \
+       __clk_hw_register_mux((dev), NULL, (name), (num_parents),             \
+                             (parent_names), NULL, NULL, (flags), (reg),     \
+                             (shift), BIT((width)) - 1, (clk_mux_flags),     \
+                             NULL, (lock))
+#define clk_hw_register_mux_hws(dev, name, parent_hws, num_parents, flags,    \
+                               reg, shift, width, clk_mux_flags, lock)       \
+       __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL,       \
+                             (parent_hws), NULL, (flags), (reg), (shift),    \
+                             BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data(dev, name, parent_data, num_parents,  \
+                                       flags, reg, shift, width,             \
+                                       clk_mux_flags, lock)                  \
+       __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+                             (parent_data), (flags), (reg), (shift),         \
+                             BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+
 int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
                         unsigned int val);
 unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index);
@@ -743,6 +1042,12 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
                struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
                struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
                unsigned long flags);
+struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
+               const struct clk_parent_data *parent_data, int num_parents,
+               struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+               struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+               struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+               unsigned long flags);
 void clk_unregister_composite(struct clk *clk);
 struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
                const char * const *parent_names, int num_parents,
@@ -750,45 +1055,14 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
                struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
                struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
                unsigned long flags);
-void clk_hw_unregister_composite(struct clk_hw *hw);
-
-/**
- * struct clk_gpio - gpio gated clock
- *
- * @hw:                handle between common and hardware-specific interfaces
- * @gpiod:     gpio descriptor
- *
- * Clock with a gpio control for enabling and disabling the parent clock
- * or switching between two parents by asserting or deasserting the gpio.
- *
- * Implements .enable, .disable and .is_enabled or
- * .get_parent, .set_parent and .determine_rate depending on which clk_ops
- * is used.
- */
-struct clk_gpio {
-       struct clk_hw   hw;
-       struct gpio_desc *gpiod;
-};
-
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
-extern const struct clk_ops clk_gpio_gate_ops;
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
-               const char *parent_name, struct gpio_desc *gpiod,
-               unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
-               const char *parent_name, struct gpio_desc *gpiod,
-               unsigned long flags);
-void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
-
-extern const struct clk_ops clk_gpio_mux_ops;
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-               unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
+               const char *name,
+               const struct clk_parent_data *parent_data, int num_parents,
+               struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+               struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+               struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
                unsigned long flags);
-void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
+void clk_hw_unregister_composite(struct clk_hw *hw);
 
 struct clk *clk_register(struct device *dev, struct clk_hw *hw);
 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw);
index 18b7b95..7fd6a1f 100644 (file)
@@ -627,6 +627,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
  * @clk: clock source
  * @rate: desired clock rate in Hz
  *
+ * Updating the rate starts at the top-most affected clock and then
+ * walks the tree down to the bottom-most clock that needs updating.
+ *
  * Returns success (0) or negative errno.
  */
 int clk_set_rate(struct clk *clk, unsigned long rate);
index 78a73eb..d5cc885 100644 (file)
@@ -663,9 +663,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
  */
 static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
 {
-       unsigned int len = strchrnul(buf, '\n') - buf;
-
-       return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+       return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
index bf9b6ca..3d013de 100644 (file)
@@ -83,7 +83,7 @@ struct dentry *debugfs_create_automount(const char *name,
                                        void *data);
 
 void debugfs_remove(struct dentry *dentry);
-void debugfs_remove_recursive(struct dentry *dentry);
+#define debugfs_remove_recursive debugfs_remove
 
 const struct file_operations *debugfs_real_fops(const struct file *filp);
 
index ffcc772..dc4fd8a 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/fcntl.h>
 #include <linux/wait.h>
 #include <linux/err.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
 
 /*
  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -40,6 +42,13 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
                                  __u64 *cnt);
 
+DECLARE_PER_CPU(int, eventfd_wake_count);
+
+static inline bool eventfd_signal_count(void)
+{
+       return this_cpu_read(eventfd_wake_count);
+}
+
 #else /* CONFIG_EVENTFD */
 
 /*
@@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
        return -ENOSYS;
 }
 
+static inline bool eventfd_signal_count(void)
+{
+       return false;
+}
+
 #endif
 
 #endif /* _LINUX_EVENTFD_H */
index e41ad9e..1b9549d 100644 (file)
@@ -89,6 +89,7 @@ enum pm_ret_status {
        XST_PM_INVALID_NODE,
        XST_PM_DOUBLE_REQ,
        XST_PM_ABORT_SUSPEND,
+       XST_PM_MULT_USER = 2008,
 };
 
 enum pm_ioctl_id {
@@ -107,6 +108,7 @@ enum pm_query_id {
        PM_QID_CLOCK_GET_PARENTS,
        PM_QID_CLOCK_GET_ATTRIBUTES,
        PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+       PM_QID_CLOCK_GET_MAX_DIVISOR,
 };
 
 enum zynqmp_pm_reset_action {
index 41584f5..6eae91c 100644 (file)
@@ -1575,7 +1575,6 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
        inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
 }
 
-extern struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran);
 extern struct timespec64 current_time(struct inode *inode);
 
 /*
@@ -2078,6 +2077,18 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
        };
 }
 
+static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+                              struct file *filp)
+{
+       *kiocb = (struct kiocb) {
+               .ki_filp = filp,
+               .ki_flags = kiocb_src->ki_flags,
+               .ki_hint = kiocb_src->ki_hint,
+               .ki_ioprio = kiocb_src->ki_ioprio,
+               .ki_pos = kiocb_src->ki_pos,
+       };
+}
+
 /*
  * Inode state bits.  Protected by inode->i_lock
  *
@@ -3108,6 +3119,10 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
                rwf_t flags);
 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
                rwf_t flags);
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+                          struct iov_iter *iter);
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+                           struct iov_iter *iter);
 
 /* fs/block_dev.c */
 extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
@@ -3303,6 +3318,8 @@ extern int simple_unlink(struct inode *, struct dentry *);
 extern int simple_rmdir(struct inode *, struct dentry *);
 extern int simple_rename(struct inode *, struct dentry *,
                         struct inode *, struct dentry *, unsigned int);
+extern void simple_recursive_removal(struct dentry *,
+                              void (*callback)(struct dentry *));
 extern int noop_fsync(struct file *, loff_t, loff_t, int);
 extern int noop_set_page_dirty(struct page *page);
 extern void noop_invalidatepage(struct page *page, unsigned int offset,
index 6d8bf4b..4a16b39 100644 (file)
 #define VTD_STRIDE_SHIFT        (9)
 #define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT)
 
-#define DMA_PTE_READ (1)
-#define DMA_PTE_WRITE (2)
-#define DMA_PTE_LARGE_PAGE (1 << 7)
-#define DMA_PTE_SNP (1 << 11)
+#define DMA_PTE_READ           BIT_ULL(0)
+#define DMA_PTE_WRITE          BIT_ULL(1)
+#define DMA_PTE_LARGE_PAGE     BIT_ULL(7)
+#define DMA_PTE_SNP            BIT_ULL(11)
+
+#define DMA_FL_PTE_PRESENT     BIT_ULL(0)
+#define DMA_FL_PTE_XD          BIT_ULL(63)
 
 #define CONTEXT_TT_MULTI_LEVEL 0
 #define CONTEXT_TT_DEV_IOTLB   1
@@ -435,8 +438,10 @@ enum {
 
 #define VTD_FLAG_TRANS_PRE_ENABLED     (1 << 0)
 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+#define VTD_FLAG_SVM_CAPABLE           (1 << 2)
 
 extern int intel_iommu_sm;
+extern spinlock_t device_domain_lock;
 
 #define sm_supported(iommu)    (intel_iommu_sm && ecap_smts((iommu)->ecap))
 #define pasid_supported(iommu) (sm_supported(iommu) &&                 \
@@ -609,10 +614,11 @@ static inline void dma_clear_pte(struct dma_pte *pte)
 static inline u64 dma_pte_addr(struct dma_pte *pte)
 {
 #ifdef CONFIG_64BIT
-       return pte->val & VTD_PAGE_MASK;
+       return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
 #else
        /* Must have a full atomic 64-bit read */
-       return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
+       return  __cmpxchg64(&pte->val, 0ULL, 0ULL) &
+                       VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
 #endif
 }
 
@@ -645,6 +651,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
                          unsigned int size_order, u64 type);
 extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
                        u16 qdep, u64 addr, unsigned mask);
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+                    unsigned long npages, bool ih);
 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 extern int dmar_ir_support(void);
@@ -656,9 +664,10 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
                                     void *data), void *data);
 void iommu_flush_write_buffer(struct intel_iommu *iommu);
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
+struct dmar_domain *find_domain(struct device *dev);
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
-int intel_svm_init(struct intel_iommu *iommu);
+extern void intel_svm_check(struct intel_iommu *iommu);
 extern int intel_svm_enable_prq(struct intel_iommu *iommu);
 extern int intel_svm_finish_prq(struct intel_iommu *iommu);
 
@@ -686,6 +695,8 @@ struct intel_svm {
 };
 
 extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
+#else
+static inline void intel_svm_check(struct intel_iommu *iommu) {}
 #endif
 
 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
index ee21eed..53d53c6 100644 (file)
@@ -83,12 +83,16 @@ struct io_pgtable_cfg {
         * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
         *      on unmap, for DMA domains using the flush queue mechanism for
         *      delayed invalidation.
+        *
+        * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
+        *      for use in the upper half of a split address space.
         */
        #define IO_PGTABLE_QUIRK_ARM_NS         BIT(0)
        #define IO_PGTABLE_QUIRK_NO_PERMS       BIT(1)
        #define IO_PGTABLE_QUIRK_TLBI_ON_MAP    BIT(2)
        #define IO_PGTABLE_QUIRK_ARM_MTK_EXT    BIT(3)
        #define IO_PGTABLE_QUIRK_NON_STRICT     BIT(4)
+       #define IO_PGTABLE_QUIRK_ARM_TTBR1      BIT(5)
        unsigned long                   quirks;
        unsigned long                   pgsize_bitmap;
        unsigned int                    ias;
@@ -100,18 +104,33 @@ struct io_pgtable_cfg {
        /* Low-level data specific to the table format */
        union {
                struct {
-                       u64     ttbr[2];
-                       u64     tcr;
+                       u64     ttbr;
+                       struct {
+                               u32     ips:3;
+                               u32     tg:2;
+                               u32     sh:2;
+                               u32     orgn:2;
+                               u32     irgn:2;
+                               u32     tsz:6;
+                       }       tcr;
                        u64     mair;
                } arm_lpae_s1_cfg;
 
                struct {
                        u64     vttbr;
-                       u64     vtcr;
+                       struct {
+                               u32     ps:3;
+                               u32     tg:2;
+                               u32     sh:2;
+                               u32     orgn:2;
+                               u32     irgn:2;
+                               u32     sl:2;
+                               u32     tsz:6;
+                       }       vtcr;
                } arm_lpae_s2_cfg;
 
                struct {
-                       u32     ttbr[2];
+                       u32     ttbr;
                        u32     tcr;
                        u32     nmrr;
                        u32     prrr;
index f2223cb..d1b5f4d 100644 (file)
@@ -246,9 +246,10 @@ struct iommu_iotlb_gather {
  * @sva_get_pasid: Get PASID associated to a SVA handle
  * @page_response: handle page request response
  * @cache_invalidate: invalidate translation caches
- * @pgsize_bitmap: bitmap of all possible supported page sizes
  * @sva_bind_gpasid: bind guest pasid and mm
  * @sva_unbind_gpasid: unbind guest pasid and mm
+ * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @owner: Driver module providing these ops
  */
 struct iommu_ops {
        bool (*capable)(enum iommu_cap);
@@ -318,6 +319,7 @@ struct iommu_ops {
        int (*sva_unbind_gpasid)(struct device *dev, int pasid);
 
        unsigned long pgsize_bitmap;
+       struct module *owner;
 };
 
 /**
@@ -386,12 +388,19 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu);
 int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
 
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
-                                       const struct iommu_ops *ops)
+static inline void __iommu_device_set_ops(struct iommu_device *iommu,
+                                         const struct iommu_ops *ops)
 {
        iommu->ops = ops;
 }
 
+#define iommu_device_set_ops(iommu, ops)                               \
+do {                                                                   \
+       struct iommu_ops *__ops = (struct iommu_ops *)(ops);            \
+       __ops->owner = THIS_MODULE;                                     \
+       __iommu_device_set_ops(iommu, __ops);                           \
+} while (0)
+
 static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
                                           struct fwnode_handle *fwnode)
 {
@@ -456,6 +465,8 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain,
 
 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
+extern void generic_iommu_put_resv_regions(struct device *dev,
+                                          struct list_head *list);
 extern int iommu_request_dm_for_dev(struct device *dev);
 extern int iommu_request_dma_domain_for_dev(struct device *dev);
 extern void iommu_set_default_passthrough(bool cmd_line);
@@ -570,6 +581,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev);
  * @ops: ops for this device's IOMMU
  * @iommu_fwnode: firmware handle for this device's IOMMU
  * @iommu_priv: IOMMU driver private data for this device
+ * @num_pasid_bits: number of PASID bits supported by this device
  * @num_ids: number of associated device IDs
  * @ids: IDs which this device may present to the IOMMU
  */
@@ -578,6 +590,7 @@ struct iommu_fwspec {
        struct fwnode_handle    *iommu_fwnode;
        void                    *iommu_priv;
        u32                     flags;
+       u32                     num_pasid_bits;
        unsigned int            num_ids;
        u32                     ids[1];
 };
index a36bdcb..2ca9b70 100644 (file)
@@ -1226,6 +1226,7 @@ struct pci_bits {
 };
 
 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
+extern void ata_pci_shutdown_one(struct pci_dev *pdev);
 extern void ata_pci_remove_one(struct pci_dev *pdev);
 
 #ifdef CONFIG_PM
index ffa6ad1..f4d5915 100644 (file)
@@ -96,8 +96,8 @@ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
 /* VM interface that may be used by firmware interface */
 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
                        int online_type, int nid);
-extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
-       unsigned long *valid_start, unsigned long *valid_end);
+extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
+                                        unsigned long end_pfn);
 extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
                                                unsigned long end_pfn);
 
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
deleted file mode 100644 (file)
index 61c2875..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ChromeOS EC multi-function device
- *
- * Copyright (C) 2012 Google, Inc
- */
-
-#ifndef __LINUX_MFD_CROS_EC_H
-#define __LINUX_MFD_CROS_EC_H
-
-#include <linux/device.h>
-
-/**
- * struct cros_ec_dev - ChromeOS EC device entry point.
- * @class_dev: Device structure used in sysfs.
- * @ec_dev: cros_ec_device structure to talk to the physical device.
- * @dev: Pointer to the platform device.
- * @debug_info: cros_ec_debugfs structure for debugging information.
- * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
- * @cmd_offset: Offset to apply for each command.
- * @features: Features supported by the EC.
- */
-struct cros_ec_dev {
-       struct device class_dev;
-       struct cros_ec_device *ec_dev;
-       struct device *dev;
-       struct cros_ec_debugfs *debug_info;
-       bool has_kb_wake_angle;
-       u16 cmd_offset;
-       u32 features[2];
-};
-
-#define to_cros_ec_dev(dev)  container_of(dev, struct cros_ec_dev, class_dev)
-
-#endif /* __LINUX_MFD_CROS_EC_H */
index 73a044e..52269e5 100644 (file)
@@ -2182,12 +2182,6 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
                                        struct mminit_pfnnid_cache *state);
 #endif
 
-#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
-void zero_resv_unavail(void);
-#else
-static inline void zero_resv_unavail(void) {}
-#endif
-
 extern void set_dma_reserve(unsigned long new_dma_reserve);
 extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
                enum memmap_context, struct vmem_altmap *);
@@ -2535,6 +2529,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn, pgprot_t pgprot);
 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                        pfn_t pfn);
+vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
+                       pfn_t pfn, pgprot_t pgprot);
 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
                unsigned long addr, pfn_t pfn);
 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
index e87bb86..c28911c 100644 (file)
@@ -312,7 +312,12 @@ struct vm_area_struct {
        /* Second cache line starts here. */
 
        struct mm_struct *vm_mm;        /* The address space we belong to. */
-       pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
+
+       /*
+        * Access permissions of this VMA.
+        * See vmf_insert_mixed_prot() for discussion.
+        */
+       pgprot_t vm_page_prot;
        unsigned long vm_flags;         /* Flags, see mm.h. */
 
        /*
index c2bc309..462f687 100644 (file)
@@ -1379,6 +1379,16 @@ static inline int pfn_present(unsigned long pfn)
        return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
 }
 
+static inline unsigned long next_present_section_nr(unsigned long section_nr)
+{
+       while (++section_nr <= __highest_present_section_nr) {
+               if (present_section_nr(section_nr))
+                       return section_nr;
+       }
+
+       return -1;
+}
+
 /*
  * These are _only_ used during initialisation, therefore they
  * can use __initdata ...  They could have names to indicate
index 6ec82e9..b1cb6b7 100644 (file)
@@ -8,16 +8,19 @@ struct mm_walk;
 
 /**
  * mm_walk_ops - callbacks for walk_page_range
- * @pud_entry:         if set, called for each non-empty PUD (2nd-level) entry
- *                     this handler should only handle pud_trans_huge() puds.
- *                     the pmd_entry or pte_entry callbacks will be used for
- *                     regular PUDs.
- * @pmd_entry:         if set, called for each non-empty PMD (3rd-level) entry
+ * @pgd_entry:         if set, called for each non-empty PGD (top-level) entry
+ * @p4d_entry:         if set, called for each non-empty P4D entry
+ * @pud_entry:         if set, called for each non-empty PUD entry
+ * @pmd_entry:         if set, called for each non-empty PMD entry
  *                     this handler is required to be able to handle
  *                     pmd_trans_huge() pmds.  They may simply choose to
  *                     split_huge_page() instead of handling it explicitly.
- * @pte_entry:         if set, called for each non-empty PTE (4th-level) entry
- * @pte_hole:          if set, called for each hole at all levels
+ * @pte_entry:         if set, called for each non-empty PTE (lowest-level)
+ *                     entry
+ * @pte_hole:          if set, called for each hole at all levels,
+ *                     depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
+ *                     4:PTE. Any folded depths (where PTRS_PER_P?D is equal
+ *                     to 1) are skipped.
  * @hugetlb_entry:     if set, called for each hugetlb entry
  * @test_walk:         caller specific callback function to determine whether
  *                     we walk over the current vma or not. Returning 0 means
@@ -27,8 +30,15 @@ struct mm_walk;
  * @pre_vma:            if set, called before starting walk on a non-null vma.
  * @post_vma:           if set, called after a walk on a non-null vma, provided
  *                      that @pre_vma and the vma walk succeeded.
+ *
+ * p?d_entry callbacks are called even if those levels are folded on a
+ * particular architecture/configuration.
  */
 struct mm_walk_ops {
+       int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
+                        unsigned long next, struct mm_walk *walk);
+       int (*p4d_entry)(p4d_t *p4d, unsigned long addr,
+                        unsigned long next, struct mm_walk *walk);
        int (*pud_entry)(pud_t *pud, unsigned long addr,
                         unsigned long next, struct mm_walk *walk);
        int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
@@ -36,7 +46,7 @@ struct mm_walk_ops {
        int (*pte_entry)(pte_t *pte, unsigned long addr,
                         unsigned long next, struct mm_walk *walk);
        int (*pte_hole)(unsigned long addr, unsigned long next,
-                       struct mm_walk *walk);
+                       int depth, struct mm_walk *walk);
        int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
                             unsigned long addr, unsigned long next,
                             struct mm_walk *walk);
@@ -47,11 +57,27 @@ struct mm_walk_ops {
        void (*post_vma)(struct mm_walk *walk);
 };
 
+/*
+ * Action for pud_entry / pmd_entry callbacks.
+ * ACTION_SUBTREE is the default
+ */
+enum page_walk_action {
+       /* Descend to next level, splitting huge pages if needed and possible */
+       ACTION_SUBTREE = 0,
+       /* Continue to next entry at this level (ignoring any subtree) */
+       ACTION_CONTINUE = 1,
+       /* Call again for this entry */
+       ACTION_AGAIN = 2
+};
+
 /**
  * mm_walk - walk_page_range data
  * @ops:       operation to call during the walk
  * @mm:                mm_struct representing the target process of page table walk
+ * @pgd:       pointer to PGD; only valid with no_vma (otherwise set to NULL)
  * @vma:       vma currently walked (NULL if walking outside vmas)
+ * @action:    next action to perform (see enum page_walk_action)
+ * @no_vma:    walk ignoring vmas (vma will always be NULL)
  * @private:   private data for callbacks' usage
  *
  * (see the comment on walk_page_range() for more details)
@@ -59,13 +85,20 @@ struct mm_walk_ops {
 struct mm_walk {
        const struct mm_walk_ops *ops;
        struct mm_struct *mm;
+       pgd_t *pgd;
        struct vm_area_struct *vma;
+       enum page_walk_action action;
+       bool no_vma;
        void *private;
 };
 
 int walk_page_range(struct mm_struct *mm, unsigned long start,
                unsigned long end, const struct mm_walk_ops *ops,
                void *private);
+int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
+                         unsigned long end, const struct mm_walk_ops *ops,
+                         pgd_t *pgd,
+                         void *private);
 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
                void *private);
 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
index 5d62e78..d08f086 100644 (file)
@@ -33,6 +33,9 @@ void pci_disable_pasid(struct pci_dev *pdev);
 int pci_pasid_features(struct pci_dev *pdev);
 int pci_max_pasids(struct pci_dev *pdev);
 #else /* CONFIG_PCI_PASID */
+static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
+{ return -EINVAL; }
+static inline void pci_disable_pasid(struct pci_dev *pdev) { }
 static inline int pci_pasid_features(struct pci_dev *pdev)
 { return -EINVAL; }
 static inline int pci_max_pasids(struct pci_dev *pdev)
index a6fabd8..176bfbd 100644 (file)
  * Declaration/definition used for per-CPU variables that should be accessed
  * as decrypted when memory encryption is enabled in the guest.
  */
-#if defined(CONFIG_VIRTUALIZATION) && defined(CONFIG_AMD_MEM_ENCRYPT)
-
+#ifdef CONFIG_AMD_MEM_ENCRYPT
 #define DECLARE_PER_CPU_DECRYPTED(type, name)                          \
        DECLARE_PER_CPU_SECTION(type, name, "..decrypted")
 
index 6d4c22a..cf65763 100644 (file)
@@ -582,7 +582,7 @@ struct swevent_hlist {
 #define PERF_ATTACH_ITRACE     0x10
 
 struct perf_cgroup;
-struct ring_buffer;
+struct perf_buffer;
 
 struct pmu_event_list {
        raw_spinlock_t          lock;
@@ -694,7 +694,7 @@ struct perf_event {
        struct mutex                    mmap_mutex;
        atomic_t                        mmap_count;
 
-       struct ring_buffer              *rb;
+       struct perf_buffer              *rb;
        struct list_head                rb_entry;
        unsigned long                   rcu_batches;
        int                             rcu_pending;
@@ -854,7 +854,7 @@ struct perf_cpu_context {
 
 struct perf_output_handle {
        struct perf_event               *event;
-       struct ring_buffer              *rb;
+       struct perf_buffer              *rb;
        unsigned long                   wakeup;
        unsigned long                   size;
        u64                             aux_flags;
index 30098a5..ba59147 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 
-#include <linux/mfd/cros_ec.h>
 #include <linux/platform_data/cros_ec_commands.h>
 
 #define CROS_EC_DEV_NAME       "cros_ec"
@@ -185,9 +184,27 @@ struct cros_ec_platform {
        u16 cmd_offset;
 };
 
-int cros_ec_suspend(struct cros_ec_device *ec_dev);
+/**
+ * struct cros_ec_dev - ChromeOS EC device entry point.
+ * @class_dev: Device structure used in sysfs.
+ * @ec_dev: cros_ec_device structure to talk to the physical device.
+ * @dev: Pointer to the platform device.
+ * @debug_info: cros_ec_debugfs structure for debugging information.
+ * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
+ * @cmd_offset: Offset to apply for each command.
+ * @features: Features supported by the EC.
+ */
+struct cros_ec_dev {
+       struct device class_dev;
+       struct cros_ec_device *ec_dev;
+       struct device *dev;
+       struct cros_ec_debugfs *debug_info;
+       bool has_kb_wake_angle;
+       u16 cmd_offset;
+       u32 features[2];
+};
 
-int cros_ec_resume(struct cros_ec_device *ec_dev);
+#define to_cros_ec_dev(dev)  container_of(dev, struct cros_ec_dev, class_dev)
 
 int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
                       struct cros_ec_command *msg);
@@ -201,10 +218,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
 int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
                            struct cros_ec_command *msg);
 
-int cros_ec_register(struct cros_ec_device *ec_dev);
-
-int cros_ec_unregister(struct cros_ec_device *ec_dev);
-
 int cros_ec_query_all(struct cros_ec_device *ec_dev);
 
 int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
@@ -217,8 +230,6 @@ int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
 
 int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
 
-bool cros_ec_handle_event(struct cros_ec_device *ec_dev);
-
 /**
  * cros_ec_get_time_ns() - Return time in ns.
  *
index 0640be5..3dfa926 100644 (file)
@@ -12,6 +12,21 @@ struct proc_dir_entry;
 struct seq_file;
 struct seq_operations;
 
+struct proc_ops {
+       int     (*proc_open)(struct inode *, struct file *);
+       ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *);
+       ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *);
+       loff_t  (*proc_lseek)(struct file *, loff_t, int);
+       int     (*proc_release)(struct inode *, struct file *);
+       __poll_t (*proc_poll)(struct file *, struct poll_table_struct *);
+       long    (*proc_ioctl)(struct file *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+       long    (*proc_compat_ioctl)(struct file *, unsigned int, unsigned long);
+#endif
+       int     (*proc_mmap)(struct file *, struct vm_area_struct *);
+       unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+};
+
 #ifdef CONFIG_PROC_FS
 
 typedef int (*proc_write_t)(struct file *, char *, size_t);
@@ -43,10 +58,10 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
  
 extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
                                               struct proc_dir_entry *,
-                                              const struct file_operations *,
+                                              const struct proc_ops *,
                                               void *);
 
-struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops);
+struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
 extern void proc_set_size(struct proc_dir_entry *, loff_t);
 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
 extern void *PDE_DATA(const struct inode *);
@@ -108,8 +123,8 @@ static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
 #define proc_create_seq(name, mode, parent, ops) ({NULL;})
 #define proc_create_single(name, mode, parent, show) ({NULL;})
 #define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
-#define proc_create(name, mode, parent, proc_fops) ({NULL;})
-#define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;})
+#define proc_create(name, mode, parent, proc_ops) ({NULL;})
+#define proc_create_data(name, mode, parent, proc_ops, data) ({NULL;})
 
 static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
 static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
new file mode 100644 (file)
index 0000000..a67065c
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_PTDUMP_H
+#define _LINUX_PTDUMP_H
+
+#include <linux/mm_types.h>
+
+struct ptdump_range {
+       unsigned long start;
+       unsigned long end;
+};
+
+struct ptdump_state {
+       /* level is 0:PGD to 4:PTE, or -1 if unknown */
+       void (*note_page)(struct ptdump_state *st, unsigned long addr,
+                         int level, unsigned long val);
+       const struct ptdump_range *range;
+};
+
+void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd);
+
+#endif /* _LINUX_PTDUMP_H */
diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h
new file mode 100644 (file)
index 0000000..b47416f
--- /dev/null
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef _MTK_SCP_H
+#define _MTK_SCP_H
+
+#include <linux/platform_device.h>
+
+typedef void (*scp_ipi_handler_t) (void *data,
+                                  unsigned int len,
+                                  void *priv);
+struct mtk_scp;
+
+/**
+ * enum ipi_id - the id of inter-processor interrupt
+ *
+ * @SCP_IPI_INIT:       The interrupt from scp is to notfiy kernel
+ *                      SCP initialization completed.
+ *                      IPI_SCP_INIT is sent from SCP when firmware is
+ *                      loaded. AP doesn't need to send IPI_SCP_INIT
+ *                      command to SCP.
+ *                      For other IPI below, AP should send the request
+ *                      to SCP to trigger the interrupt.
+ * @SCP_IPI_MAX:        The maximum IPI number
+ */
+
+enum scp_ipi_id {
+       SCP_IPI_INIT = 0,
+       SCP_IPI_VDEC_H264,
+       SCP_IPI_VDEC_VP8,
+       SCP_IPI_VDEC_VP9,
+       SCP_IPI_VENC_H264,
+       SCP_IPI_VENC_VP8,
+       SCP_IPI_MDP_INIT,
+       SCP_IPI_MDP_DEINIT,
+       SCP_IPI_MDP_FRAME,
+       SCP_IPI_DIP,
+       SCP_IPI_ISP_CMD,
+       SCP_IPI_ISP_FRAME,
+       SCP_IPI_FD_CMD,
+       SCP_IPI_CROS_HOST_CMD,
+       SCP_IPI_NS_SERVICE = 0xFF,
+       SCP_IPI_MAX = 0x100,
+};
+
+struct mtk_scp *scp_get(struct platform_device *pdev);
+void scp_put(struct mtk_scp *scp);
+
+struct device *scp_get_device(struct mtk_scp *scp);
+struct rproc *scp_get_rproc(struct mtk_scp *scp);
+
+int scp_ipi_register(struct mtk_scp *scp, u32 id, scp_ipi_handler_t handler,
+                    void *priv);
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id);
+
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+                unsigned int wait);
+
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp);
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp);
+
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr);
+
+#endif /* _MTK_SCP_H */
index 1a40277..df0124e 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/seq_file.h>
 #include <linux/poll.h>
 
-struct ring_buffer;
+struct trace_buffer;
 struct ring_buffer_iter;
 
 /*
@@ -77,13 +77,13 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
  *  else
  *    ring_buffer_unlock_commit(buffer, event);
  */
-void ring_buffer_discard_commit(struct ring_buffer *buffer,
+void ring_buffer_discard_commit(struct trace_buffer *buffer,
                                struct ring_buffer_event *event);
 
 /*
  * size is in bytes for each per CPU buffer.
  */
-struct ring_buffer *
+struct trace_buffer *
 __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
 
 /*
@@ -97,38 +97,38 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
        __ring_buffer_alloc((size), (flags), &__key);   \
 })
 
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
-__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
                          struct file *filp, poll_table *poll_table);
 
 
 #define RING_BUFFER_ALL_CPUS -1
 
-void ring_buffer_free(struct ring_buffer *buffer);
+void ring_buffer_free(struct trace_buffer *buffer);
 
-int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
+int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
 
-void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
+void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
 
-struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
                                                   unsigned long length);
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+int ring_buffer_unlock_commit(struct trace_buffer *buffer,
                              struct ring_buffer_event *event);
-int ring_buffer_write(struct ring_buffer *buffer,
+int ring_buffer_write(struct trace_buffer *buffer,
                      unsigned long length, void *data);
 
-void ring_buffer_nest_start(struct ring_buffer *buffer);
-void ring_buffer_nest_end(struct ring_buffer *buffer);
+void ring_buffer_nest_start(struct trace_buffer *buffer);
+void ring_buffer_nest_end(struct trace_buffer *buffer);
 
 struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
                 unsigned long *lost_events);
 struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
                    unsigned long *lost_events);
 
 struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
+ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
 void ring_buffer_read_prepare_sync(void);
 void ring_buffer_read_start(struct ring_buffer_iter *iter);
 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
@@ -140,59 +140,59 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
 void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
 int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
 
-unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
 
-void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_reset(struct ring_buffer *buffer);
+void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_reset(struct trace_buffer *buffer);
 
 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
-int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
-                        struct ring_buffer *buffer_b, int cpu);
+int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+                        struct trace_buffer *buffer_b, int cpu);
 #else
 static inline int
-ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
-                    struct ring_buffer *buffer_b, int cpu)
+ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+                    struct trace_buffer *buffer_b, int cpu)
 {
        return -ENODEV;
 }
 #endif
 
-bool ring_buffer_empty(struct ring_buffer *buffer);
-bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
-
-void ring_buffer_record_disable(struct ring_buffer *buffer);
-void ring_buffer_record_enable(struct ring_buffer *buffer);
-void ring_buffer_record_off(struct ring_buffer *buffer);
-void ring_buffer_record_on(struct ring_buffer *buffer);
-bool ring_buffer_record_is_on(struct ring_buffer *buffer);
-bool ring_buffer_record_is_set_on(struct ring_buffer *buffer);
-void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
-
-u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_entries(struct ring_buffer *buffer);
-unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
-unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
-
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
-void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+bool ring_buffer_empty(struct trace_buffer *buffer);
+bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
+
+void ring_buffer_record_disable(struct trace_buffer *buffer);
+void ring_buffer_record_enable(struct trace_buffer *buffer);
+void ring_buffer_record_off(struct trace_buffer *buffer);
+void ring_buffer_record_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
+void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
+
+u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_entries(struct trace_buffer *buffer);
+unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
+unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
+
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
+void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
                                      int cpu, u64 *ts);
-void ring_buffer_set_clock(struct ring_buffer *buffer,
+void ring_buffer_set_clock(struct trace_buffer *buffer,
                           u64 (*clock)(void));
-void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs);
-bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
+void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
+bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
 
-size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
-size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
+size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
+size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
 
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
+int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
                          size_t len, int cpu, int full);
 
 struct trace_seq;
diff --git a/include/linux/rpmsg/mtk_rpmsg.h b/include/linux/rpmsg/mtk_rpmsg.h
new file mode 100644 (file)
index 0000000..363b601
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC.
+ */
+
+#ifndef __LINUX_RPMSG_MTK_RPMSG_H
+#define __LINUX_RPMSG_MTK_RPMSG_H
+
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+
+typedef void (*ipi_handler_t)(void *data, unsigned int len, void *priv);
+
+/*
+ * struct mtk_rpmsg_info - IPI functions tied to the rpmsg device.
+ * @register_ipi: register IPI handler for an IPI id.
+ * @unregister_ipi: unregister IPI handler for a registered IPI id.
+ * @send_ipi: send IPI to an IPI id. wait is the timeout (in msecs) to wait
+ *            until response, or 0 if there's no timeout.
+ * @ns_ipi_id: the IPI id used for name service, or -1 if name service isn't
+ *             supported.
+ */
+struct mtk_rpmsg_info {
+       int (*register_ipi)(struct platform_device *pdev, u32 id,
+                           ipi_handler_t handler, void *priv);
+       void (*unregister_ipi)(struct platform_device *pdev, u32 id);
+       int (*send_ipi)(struct platform_device *pdev, u32 id,
+                       void *buf, unsigned int len, unsigned int wait);
+       int ns_ipi_id;
+};
+
+struct rproc_subdev *
+mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
+                             struct mtk_rpmsg_info *info);
+
+void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev);
+
+#endif
index 4e9d3c7..23990bd 100644 (file)
@@ -167,6 +167,7 @@ struct rtc_device {
 #define RTC_TIMESTAMP_BEGIN_1900       -2208988800LL /* 1900-01-01 00:00:00 */
 #define RTC_TIMESTAMP_BEGIN_2000       946684800LL /* 2000-01-01 00:00:00 */
 #define RTC_TIMESTAMP_END_2063         2966371199LL /* 2063-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_2079         3471292799LL /* 2079-12-31 23:59:59 */
 #define RTC_TIMESTAMP_END_2099         4102444799LL /* 2099-12-31 23:59:59 */
 #define RTC_TIMESTAMP_END_2199         7258118399LL /* 2199-12-31 23:59:59 */
 #define RTC_TIMESTAMP_END_9999         253402300799LL /* 9999-12-31 23:59:59 */
index 5998e1f..770c2bf 100644 (file)
@@ -160,6 +160,19 @@ static const struct file_operations __name ## _fops = {                    \
        .release        = single_release,                               \
 }
 
+#define DEFINE_PROC_SHOW_ATTRIBUTE(__name)                             \
+static int __name ## _open(struct inode *inode, struct file *file)     \
+{                                                                      \
+       return single_open(file, __name ## _show, inode->i_private);    \
+}                                                                      \
+                                                                       \
+static const struct proc_ops __name ## _proc_ops = {                   \
+       .proc_open      = __name ## _open,                              \
+       .proc_read      = seq_read,                                     \
+       .proc_lseek     = seq_lseek,                                    \
+       .proc_release   = single_release,                               \
+}
+
 static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
 {
 #ifdef CONFIG_USER_NS
index 877a95c..03a3893 100644 (file)
@@ -184,7 +184,6 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
 /*
  * Common kmalloc functions provided by all allocators
  */
-void * __must_check __krealloc(const void *, size_t, gfp_t);
 void * __must_check krealloc(const void *, size_t, gfp_t);
 void kfree(const void *);
 void kzfree(const void *);
index 02894e4..6dfbb2e 100644 (file)
@@ -62,6 +62,7 @@ extern char * strchr(const char *,int);
 #ifndef __HAVE_ARCH_STRCHRNUL
 extern char * strchrnul(const char *,int);
 #endif
+extern char * strnchrnul(const char *, size_t, int);
 #ifndef __HAVE_ARCH_STRNCHR
 extern char * strnchr(const char *, size_t, int);
 #endif
index 84b92b4..d94d4f4 100644 (file)
@@ -63,7 +63,7 @@ struct proc_dir_entry *       rpc_proc_register(struct net *,struct rpc_stat *);
 void                   rpc_proc_unregister(struct net *,const char *);
 void                   rpc_proc_zero(const struct rpc_program *);
 struct proc_dir_entry *        svc_proc_register(struct net *, struct svc_stat *,
-                                         const struct file_operations *);
+                                         const struct proc_ops *);
 void                   svc_proc_unregister(struct net *, const char *);
 
 void                   svc_seq_show(struct seq_file *,
@@ -75,7 +75,7 @@ static inline void rpc_proc_unregister(struct net *net, const char *p) {}
 static inline void rpc_proc_zero(const struct rpc_program *p) {}
 
 static inline struct proc_dir_entry *svc_proc_register(struct net *net, struct svc_stat *s,
-                                                      const struct file_operations *f) { return NULL; }
+                                                      const struct proc_ops *proc_ops) { return NULL; }
 static inline void svc_proc_unregister(struct net *net, const char *p) {}
 
 static inline void svc_seq_show(struct seq_file *seq,
index 13ea7f7..af2c85d 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/tracepoint.h>
 
 struct trace_array;
-struct trace_buffer;
+struct array_buffer;
 struct tracer;
 struct dentry;
 struct bpf_prog;
@@ -79,7 +79,7 @@ struct trace_entry {
 struct trace_iterator {
        struct trace_array      *tr;
        struct tracer           *trace;
-       struct trace_buffer     *trace_buffer;
+       struct array_buffer     *array_buffer;
        void                    *private;
        int                     cpu_file;
        struct mutex            mutex;
@@ -153,7 +153,7 @@ void tracing_generic_entry_update(struct trace_entry *entry,
 struct trace_event_file;
 
 struct ring_buffer_event *
-trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
+trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
                                struct trace_event_file *trace_file,
                                int type, unsigned long len,
                                unsigned long flags, int pc);
@@ -226,12 +226,13 @@ extern int trace_event_reg(struct trace_event_call *event,
                            enum trace_reg type, void *data);
 
 struct trace_event_buffer {
-       struct ring_buffer              *buffer;
+       struct trace_buffer             *buffer;
        struct ring_buffer_event        *event;
        struct trace_event_file         *trace_file;
        void                            *entry;
        unsigned long                   flags;
        int                             pc;
+       struct pt_regs                  *regs;
 };
 
 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
@@ -364,6 +365,128 @@ enum {
        EVENT_FILE_FL_WAS_ENABLED_BIT,
 };
 
+extern struct trace_event_file *trace_get_event_file(const char *instance,
+                                                    const char *system,
+                                                    const char *event);
+extern void trace_put_event_file(struct trace_event_file *file);
+
+#define MAX_DYNEVENT_CMD_LEN   (2048)
+
+enum dynevent_type {
+       DYNEVENT_TYPE_SYNTH = 1,
+       DYNEVENT_TYPE_KPROBE,
+       DYNEVENT_TYPE_NONE,
+};
+
+struct dynevent_cmd;
+
+typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
+
+struct dynevent_cmd {
+       struct seq_buf          seq;
+       const char              *event_name;
+       unsigned int            n_fields;
+       enum dynevent_type      type;
+       dynevent_create_fn_t    run_command;
+       void                    *private_data;
+};
+
+extern int dynevent_create(struct dynevent_cmd *cmd);
+
+extern int synth_event_delete(const char *name);
+
+extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
+                                char *buf, int maxlen);
+
+extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
+                                      const char *name,
+                                      struct module *mod, ...);
+
+#define synth_event_gen_cmd_start(cmd, name, mod, ...) \
+       __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
+
+struct synth_field_desc {
+       const char *type;
+       const char *name;
+};
+
+extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
+                                          const char *name,
+                                          struct module *mod,
+                                          struct synth_field_desc *fields,
+                                          unsigned int n_fields);
+extern int synth_event_create(const char *name,
+                             struct synth_field_desc *fields,
+                             unsigned int n_fields, struct module *mod);
+
+extern int synth_event_add_field(struct dynevent_cmd *cmd,
+                                const char *type,
+                                const char *name);
+extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
+                                    const char *type_name);
+extern int synth_event_add_fields(struct dynevent_cmd *cmd,
+                                 struct synth_field_desc *fields,
+                                 unsigned int n_fields);
+
+#define synth_event_gen_cmd_end(cmd)   \
+       dynevent_create(cmd)
+
+struct synth_event;
+
+struct synth_event_trace_state {
+       struct trace_event_buffer fbuffer;
+       struct synth_trace_event *entry;
+       struct trace_buffer *buffer;
+       struct synth_event *event;
+       unsigned int cur_field;
+       unsigned int n_u64;
+       bool enabled;
+       bool add_next;
+       bool add_name;
+};
+
+extern int synth_event_trace(struct trace_event_file *file,
+                            unsigned int n_vals, ...);
+extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
+                                  unsigned int n_vals);
+extern int synth_event_trace_start(struct trace_event_file *file,
+                                  struct synth_event_trace_state *trace_state);
+extern int synth_event_add_next_val(u64 val,
+                                   struct synth_event_trace_state *trace_state);
+extern int synth_event_add_val(const char *field_name, u64 val,
+                              struct synth_event_trace_state *trace_state);
+extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
+
+extern int kprobe_event_delete(const char *name);
+
+extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
+                                 char *buf, int maxlen);
+
+#define kprobe_event_gen_cmd_start(cmd, name, loc, ...)                        \
+       __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
+
+#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...)             \
+       __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
+
+extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
+                                       bool kretprobe,
+                                       const char *name,
+                                       const char *loc, ...);
+
+#define kprobe_event_add_fields(cmd, ...)      \
+       __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
+
+#define kprobe_event_add_field(cmd, field)     \
+       __kprobe_event_add_fields(cmd, field, NULL)
+
+extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+
+#define kprobe_event_gen_cmd_end(cmd)          \
+       dynevent_create(cmd)
+
+#define kretprobe_event_gen_cmd_end(cmd)       \
+       dynevent_create(cmd)
+
 /*
  * Event file flags:
  *  ENABLED      - The event is enabled
index 88d279c..9991244 100644 (file)
@@ -28,7 +28,6 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
 struct dentry *tracefs_create_dir(const char *name, struct dentry *parent);
 
 void tracefs_remove(struct dentry *dentry);
-void tracefs_remove_recursive(struct dentry *dentry);
 
 struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
                                           int (*mkdir)(const char *name),
index 5a31525..f657ff0 100644 (file)
@@ -1450,7 +1450,7 @@ struct snd_pcm_status64 {
 #define SNDRV_PCM_IOCTL_STATUS_EXT64   _IOWR('A', 0x24, struct snd_pcm_status64)
 
 struct snd_pcm_status32 {
-       s32 state;              /* stream state */
+       snd_pcm_state_t state;          /* stream state */
        s32 trigger_tstamp_sec; /* time when stream was started/stopped/paused */
        s32 trigger_tstamp_nsec;
        s32 tstamp_sec;         /* reference timestamp */
@@ -1461,7 +1461,7 @@ struct snd_pcm_status32 {
        u32 avail;              /* number of frames available */
        u32 avail_max;          /* max frames available on hw since last status */
        u32 overrange;          /* count of ADC (capture) overrange detections from last status */
-       s32 suspended_state;    /* suspended stream state */
+       snd_pcm_state_t suspended_state;        /* suspended stream state */
        u32 audio_tstamp_data;  /* needed for 64-bit alignment, used for configs/report to/from userspace */
        s32 audio_tstamp_sec;   /* sample counter, wall clock, PHC or on-demand sync'ed */
        s32 audio_tstamp_nsec;
index 54e61d4..112bd06 100644 (file)
@@ -49,12 +49,6 @@ DEFINE_EVENT(dma_map, map_single,
        TP_ARGS(dev, dev_addr, phys_addr, size)
 );
 
-DEFINE_EVENT(dma_map, map_sg,
-       TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
-                size_t size),
-       TP_ARGS(dev, dev_addr, phys_addr, size)
-);
-
 DEFINE_EVENT(dma_map, bounce_map_single,
        TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
                 size_t size),
@@ -99,6 +93,48 @@ DEFINE_EVENT(dma_unmap, bounce_unmap_single,
        TP_ARGS(dev, dev_addr, size)
 );
 
+DECLARE_EVENT_CLASS(dma_map_sg,
+       TP_PROTO(struct device *dev, int index, int total,
+                struct scatterlist *sg),
+
+       TP_ARGS(dev, index, total, sg),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name(dev))
+               __field(dma_addr_t, dev_addr)
+               __field(phys_addr_t, phys_addr)
+               __field(size_t, size)
+               __field(int, index)
+               __field(int, total)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name(dev));
+               __entry->dev_addr = sg->dma_address;
+               __entry->phys_addr = sg_phys(sg);
+               __entry->size = sg->dma_length;
+               __entry->index = index;
+               __entry->total = total;
+       ),
+
+       TP_printk("dev=%s [%d/%d] dev_addr=0x%llx phys_addr=0x%llx size=%zu",
+                 __get_str(dev_name), __entry->index, __entry->total,
+                 (unsigned long long)__entry->dev_addr,
+                 (unsigned long long)__entry->phys_addr,
+                 __entry->size)
+);
+
+DEFINE_EVENT(dma_map_sg, map_sg,
+       TP_PROTO(struct device *dev, int index, int total,
+                struct scatterlist *sg),
+       TP_ARGS(dev, index, total, sg)
+);
+
+DEFINE_EVENT(dma_map_sg, bounce_map_sg,
+       TP_PROTO(struct device *dev, int index, int total,
+                struct scatterlist *sg),
+       TP_ARGS(dev, index, total, sg)
+);
 #endif /* _TRACE_INTEL_IOMMU_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
new file mode 100644 (file)
index 0000000..cf243de
--- /dev/null
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pwm
+
+#if !defined(_TRACE_PWM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PWM_H
+
+#include <linux/pwm.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(pwm,
+
+       TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+       TP_ARGS(pwm, state),
+
+       TP_STRUCT__entry(
+               __field(struct pwm_device *, pwm)
+               __field(u64, period)
+               __field(u64, duty_cycle)
+               __field(enum pwm_polarity, polarity)
+               __field(bool, enabled)
+       ),
+
+       TP_fast_assign(
+               __entry->pwm = pwm;
+               __entry->period = state->period;
+               __entry->duty_cycle = state->duty_cycle;
+               __entry->polarity = state->polarity;
+               __entry->enabled = state->enabled;
+       ),
+
+       TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d",
+                 __entry->pwm, __entry->period, __entry->duty_cycle,
+                 __entry->polarity, __entry->enabled)
+
+);
+
+DEFINE_EVENT(pwm, pwm_apply,
+
+       TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+       TP_ARGS(pwm, state)
+
+);
+
+DEFINE_EVENT(pwm, pwm_get,
+
+       TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+       TP_ARGS(pwm, state)
+
+);
+
+#endif /* _TRACE_PWM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 96d77e5..502c7be 100644 (file)
@@ -2,7 +2,8 @@
 /*
  * Stage 1 of the trace events.
  *
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
  *
  * struct trace_event_raw_<call> {
  *     struct trace_entry              ent;
@@ -223,7 +224,8 @@ TRACE_MAKE_SYSTEM_STR();
 /*
  * Stage 3 of the trace events.
  *
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
  *
  * enum print_line_t
  * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
@@ -533,7 +535,8 @@ static inline notrace int trace_event_get_offsets_##call(           \
 /*
  * Stage 4 of the trace events.
  *
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
  *
  * For those macros defined with TRACE_EVENT:
  *
@@ -548,7 +551,7 @@ static inline notrace int trace_event_get_offsets_##call(           \
  *     enum event_trigger_type __tt = ETT_NONE;
  *     struct ring_buffer_event *event;
  *     struct trace_event_raw_<call> *entry; <-- defined in stage 1
- *     struct ring_buffer *buffer;
+ *     struct trace_buffer *buffer;
  *     unsigned long irq_flags;
  *     int __data_size;
  *     int pc;
index 2ad1788..095af36 100644 (file)
@@ -92,7 +92,12 @@ struct rtc_pll_info {
 #define RTC_PLL_GET    _IOR('p', 0x11, struct rtc_pll_info)  /* Get PLL correction */
 #define RTC_PLL_SET    _IOW('p', 0x12, struct rtc_pll_info)  /* Set PLL correction */
 
-#define RTC_VL_READ    _IOR('p', 0x13, int)    /* Voltage low detector */
+#define RTC_VL_DATA_INVALID    BIT(0) /* Voltage too low, RTC data is invalid */
+#define RTC_VL_BACKUP_LOW      BIT(1) /* Backup voltage is low */
+#define RTC_VL_BACKUP_EMPTY    BIT(2) /* Backup empty or not present */
+#define RTC_VL_ACCURACY_LOW    BIT(3) /* Voltage is low, RTC accuracy is reduced */
+
+#define RTC_VL_READ    _IOR('p', 0x13, unsigned int)   /* Voltage low detection */
 #define RTC_VL_CLR     _IO('p', 0x14)          /* Clear voltage low information */
 
 /* interrupt flags */
index 30ebb2a..535a722 100644 (file)
@@ -564,13 +564,13 @@ typedef char __pad_after_uframe[sizeof(__u64) - sizeof(snd_pcm_uframes_t)];
 #endif
 
 struct __snd_pcm_mmap_status64 {
-       __s32 state;                    /* RO: state - SNDRV_PCM_STATE_XXXX */
+       snd_pcm_state_t state;          /* RO: state - SNDRV_PCM_STATE_XXXX */
        __u32 pad1;                     /* Needed for 64 bit alignment */
        __pad_before_uframe __pad1;
        snd_pcm_uframes_t hw_ptr;       /* RO: hw ptr (0...boundary-1) */
        __pad_after_uframe __pad2;
        struct __snd_timespec64 tstamp; /* Timestamp */
-       __s32 suspended_state;          /* RO: suspended stream state */
+       snd_pcm_state_t suspended_state;/* RO: suspended stream state */
        __u32 pad3;                     /* Needed for 64 bit alignment */
        struct __snd_timespec64 audio_tstamp; /* sample counter or wall clock */
 };
index 24228a1..89a8895 100644 (file)
@@ -76,6 +76,7 @@ struct xenbus_device {
        enum xenbus_state state;
        struct completion down;
        struct work_struct work;
+       spinlock_t reclaim_lock;
 };
 
 static inline struct xenbus_device *to_xenbus_device(struct device *dev)
@@ -105,6 +106,7 @@ struct xenbus_driver {
        struct device_driver driver;
        int (*read_otherend_details)(struct xenbus_device *dev);
        int (*is_ready)(struct xenbus_device *dev);
+       void (*reclaim_memory)(struct xenbus_device *dev);
 };
 
 static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
index 24b23d8..cfee56c 100644 (file)
@@ -1224,6 +1224,20 @@ source "usr/Kconfig"
 
 endif
 
+config BOOT_CONFIG
+       bool "Boot config support"
+       depends on BLK_DEV_INITRD
+       select LIBXBC
+       default y
+       help
+         Extra boot config allows system admin to pass a config file as
+         complemental extension of kernel cmdline when booting.
+         The boot config file must be attached at the end of initramfs
+         with checksum and size.
+         See <file:Documentation/admin-guide/bootconfig.rst> for details.
+
+         If unsure, say Y.
+
 choice
        prompt "Compiler optimization level"
        default CC_OPTIMIZE_FOR_PERFORMANCE
index d8c7e86..cc0ee48 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/initrd.h>
 #include <linux/memblock.h>
 #include <linux/acpi.h>
+#include <linux/bootconfig.h>
 #include <linux/console.h>
 #include <linux/nmi.h>
 #include <linux/percpu.h>
@@ -136,8 +137,10 @@ char __initdata boot_command_line[COMMAND_LINE_SIZE];
 char *saved_command_line;
 /* Command line for parameter parsing */
 static char *static_command_line;
-/* Command line for per-initcall parameter parsing */
-static char *initcall_command_line;
+/* Untouched extra command line */
+static char *extra_command_line;
+/* Extra init arguments */
+static char *extra_init_args;
 
 static char *execute_command;
 static char *ramdisk_execute_command;
@@ -245,6 +248,156 @@ static int __init loglevel(char *str)
 
 early_param("loglevel", loglevel);
 
+#ifdef CONFIG_BOOT_CONFIG
+
+char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
+
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+static int __init xbc_snprint_cmdline(char *buf, size_t size,
+                                     struct xbc_node *root)
+{
+       struct xbc_node *knode, *vnode;
+       char *end = buf + size;
+       char c = '\"';
+       const char *val;
+       int ret;
+
+       xbc_node_for_each_key_value(root, knode, val) {
+               ret = xbc_node_compose_key_after(root, knode,
+                                       xbc_namebuf, XBC_KEYLEN_MAX);
+               if (ret < 0)
+                       return ret;
+
+               vnode = xbc_node_get_child(knode);
+               ret = snprintf(buf, rest(buf, end), "%s%c", xbc_namebuf,
+                               vnode ? '=' : ' ');
+               if (ret < 0)
+                       return ret;
+               buf += ret;
+               if (!vnode)
+                       continue;
+
+               c = '\"';
+               xbc_array_for_each_value(vnode, val) {
+                       ret = snprintf(buf, rest(buf, end), "%c%s", c, val);
+                       if (ret < 0)
+                               return ret;
+                       buf += ret;
+                       c = ',';
+               }
+               if (rest(buf, end) > 2)
+                       strcpy(buf, "\" ");
+               buf += 2;
+       }
+
+       return buf - (end - size);
+}
+#undef rest
+
+/* Make an extra command line under given key word */
+static char * __init xbc_make_cmdline(const char *key)
+{
+       struct xbc_node *root;
+       char *new_cmdline;
+       int ret, len = 0;
+
+       root = xbc_find_node(key);
+       if (!root)
+               return NULL;
+
+       /* Count required buffer size */
+       len = xbc_snprint_cmdline(NULL, 0, root);
+       if (len <= 0)
+               return NULL;
+
+       new_cmdline = memblock_alloc(len + 1, SMP_CACHE_BYTES);
+       if (!new_cmdline) {
+               pr_err("Failed to allocate memory for extra kernel cmdline.\n");
+               return NULL;
+       }
+
+       ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
+       if (ret < 0 || ret > len) {
+               pr_err("Failed to print extra kernel cmdline.\n");
+               return NULL;
+       }
+
+       return new_cmdline;
+}
+
+u32 boot_config_checksum(unsigned char *p, u32 size)
+{
+       u32 ret = 0;
+
+       while (size--)
+               ret += *p++;
+
+       return ret;
+}
+
+static void __init setup_boot_config(const char *cmdline)
+{
+       u32 size, csum;
+       char *data, *copy;
+       const char *p;
+       u32 *hdr;
+       int ret;
+
+       p = strstr(cmdline, "bootconfig");
+       if (!p || (p != cmdline && !isspace(*(p-1))) ||
+           (p[10] && !isspace(p[10])))
+               return;
+
+       if (!initrd_end)
+               goto not_found;
+
+       hdr = (u32 *)(initrd_end - 8);
+       size = hdr[0];
+       csum = hdr[1];
+
+       if (size >= XBC_DATA_MAX) {
+               pr_err("bootconfig size %d greater than max size %d\n",
+                       size, XBC_DATA_MAX);
+               return;
+       }
+
+       data = ((void *)hdr) - size;
+       if ((unsigned long)data < initrd_start)
+               goto not_found;
+
+       if (boot_config_checksum((unsigned char *)data, size) != csum) {
+               pr_err("bootconfig checksum failed\n");
+               return;
+       }
+
+       copy = memblock_alloc(size + 1, SMP_CACHE_BYTES);
+       if (!copy) {
+               pr_err("Failed to allocate memory for bootconfig\n");
+               return;
+       }
+
+       memcpy(copy, data, size);
+       copy[size] = '\0';
+
+       ret = xbc_init(copy);
+       if (ret < 0)
+               pr_err("Failed to parse bootconfig\n");
+       else {
+               pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret);
+               /* keys starting with "kernel." are passed via cmdline */
+               extra_command_line = xbc_make_cmdline("kernel");
+               /* Also, "init." keys are init arguments */
+               extra_init_args = xbc_make_cmdline("init");
+       }
+       return;
+not_found:
+       pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+}
+#else
+#define setup_boot_config(cmdline)     do { } while (0)
+#endif
+
 /* Change NUL term back to "=", to make "param" the whole string. */
 static void __init repair_env_string(char *param, char *val)
 {
@@ -373,22 +526,50 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
  */
 static void __init setup_command_line(char *command_line)
 {
-       size_t len = strlen(boot_command_line) + 1;
+       size_t len, xlen = 0, ilen = 0;
 
-       saved_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
-       if (!saved_command_line)
-               panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+       if (extra_command_line)
+               xlen = strlen(extra_command_line);
+       if (extra_init_args)
+               ilen = strlen(extra_init_args) + 4; /* for " -- " */
 
-       initcall_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
-       if (!initcall_command_line)
-               panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+       len = xlen + strlen(boot_command_line) + 1;
+
+       saved_command_line = memblock_alloc(len + ilen, SMP_CACHE_BYTES);
+       if (!saved_command_line)
+               panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
 
        static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
        if (!static_command_line)
                panic("%s: Failed to allocate %zu bytes\n", __func__, len);
 
-       strcpy(saved_command_line, boot_command_line);
-       strcpy(static_command_line, command_line);
+       if (xlen) {
+               /*
+                * We have to put extra_command_line before boot command
+                * lines because there could be dashes (separator of init
+                * command line) in the command lines.
+                */
+               strcpy(saved_command_line, extra_command_line);
+               strcpy(static_command_line, extra_command_line);
+       }
+       strcpy(saved_command_line + xlen, boot_command_line);
+       strcpy(static_command_line + xlen, command_line);
+
+       if (ilen) {
+               /*
+                * Append supplemental init boot args to saved_command_line
+                * so that user can check what command line options passed
+                * to init.
+                */
+               len = strlen(saved_command_line);
+               if (!strstr(boot_command_line, " -- ")) {
+                       strcpy(saved_command_line + len, " -- ");
+                       len += 4;
+               } else
+                       saved_command_line[len++] = ' ';
+
+               strcpy(saved_command_line + len, extra_init_args);
+       }
 }
 
 /*
@@ -595,6 +776,7 @@ asmlinkage __visible void __init start_kernel(void)
        pr_notice("%s", linux_banner);
        early_security_init();
        setup_arch(&command_line);
+       setup_boot_config(command_line);
        setup_command_line(command_line);
        setup_nr_cpu_ids();
        setup_per_cpu_areas();
@@ -604,7 +786,7 @@ asmlinkage __visible void __init start_kernel(void)
        build_all_zonelists(NULL);
        page_alloc_init();
 
-       pr_notice("Kernel command line: %s\n", boot_command_line);
+       pr_notice("Kernel command line: %s\n", saved_command_line);
        /* parameters may set static keys */
        jump_label_init();
        parse_early_param();
@@ -615,6 +797,9 @@ asmlinkage __visible void __init start_kernel(void)
        if (!IS_ERR_OR_NULL(after_dashes))
                parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
                           NULL, set_init_arg);
+       if (extra_init_args)
+               parse_args("Setting extra init args", extra_init_args,
+                          NULL, 0, -1, -1, NULL, set_init_arg);
 
        /*
         * These use large bootmem allocations and must precede
@@ -996,13 +1181,12 @@ static int __init ignore_unknown_bootoption(char *param, char *val,
        return 0;
 }
 
-static void __init do_initcall_level(int level)
+static void __init do_initcall_level(int level, char *command_line)
 {
        initcall_entry_t *fn;
 
-       strcpy(initcall_command_line, saved_command_line);
        parse_args(initcall_level_names[level],
-                  initcall_command_line, __start___param,
+                  command_line, __start___param,
                   __stop___param - __start___param,
                   level, level,
                   NULL, ignore_unknown_bootoption);
@@ -1015,9 +1199,20 @@ static void __init do_initcall_level(int level)
 static void __init do_initcalls(void)
 {
        int level;
+       size_t len = strlen(saved_command_line) + 1;
+       char *command_line;
+
+       command_line = kzalloc(len, GFP_KERNEL);
+       if (!command_line)
+               panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+       for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
+               /* Parser modifies command_line, restore it each time */
+               strcpy(command_line, saved_command_line);
+               do_initcall_level(level, command_line);
+       }
 
-       for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
-               do_initcall_level(level);
+       kfree(command_line);
 }
 
 /*
index 3d920ff..49a05ba 100644 (file)
@@ -63,6 +63,66 @@ struct posix_msg_tree_node {
        int                     priority;
 };
 
+/*
+ * Locking:
+ *
+ * Accesses to a message queue are synchronized by acquiring info->lock.
+ *
+ * There are two notable exceptions:
+ * - The actual wakeup of a sleeping task is performed using the wake_q
+ *   framework. info->lock is already released when wake_up_q is called.
+ * - The exit codepaths after sleeping check ext_wait_queue->state without
+ *   any locks. If it is STATE_READY, then the syscall is completed without
+ *   acquiring info->lock.
+ *
+ * MQ_BARRIER:
+ * To achieve proper release/acquire memory barrier pairing, the state is set to
+ * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
+ * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
+ *
+ * This prevents the following races:
+ *
+ * 1) With the simple wake_q_add(), the task could be gone already before
+ *    the increase of the reference happens
+ * Thread A
+ *                             Thread B
+ * WRITE_ONCE(wait.state, STATE_NONE);
+ * schedule_hrtimeout()
+ *                             wake_q_add(A)
+ *                             if (cmpxchg()) // success
+ *                                ->state = STATE_READY (reordered)
+ * <timeout returns>
+ * if (wait.state == STATE_READY) return;
+ * sysret to user space
+ * sys_exit()
+ *                             get_task_struct() // UaF
+ *
+ * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
+ * the smp_store_release() that does ->state = STATE_READY.
+ *
+ * 2) Without proper _release/_acquire barriers, the woken up task
+ *    could read stale data
+ *
+ * Thread A
+ *                             Thread B
+ * do_mq_timedreceive
+ * WRITE_ONCE(wait.state, STATE_NONE);
+ * schedule_hrtimeout()
+ *                             state = STATE_READY;
+ * <timeout returns>
+ * if (wait.state == STATE_READY) return;
+ * msg_ptr = wait.msg;         // Access to stale data!
+ *                             receiver->msg = message; (reordered)
+ *
+ * Solution: use _release and _acquire barriers.
+ *
+ * 3) There is intentionally no barrier when setting current->state
+ *    to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
+ *    release memory barrier, and the wakeup is triggered when holding
+ *    info->lock, i.e. spin_lock(&info->lock) provided a pairing
+ *    acquire memory barrier.
+ */
+
 struct ext_wait_queue {                /* queue of sleeping tasks */
        struct task_struct *task;
        struct list_head list;
@@ -646,18 +706,23 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr,
        wq_add(info, sr, ewp);
 
        for (;;) {
+               /* memory barrier not required, we hold info->lock */
                __set_current_state(TASK_INTERRUPTIBLE);
 
                spin_unlock(&info->lock);
                time = schedule_hrtimeout_range_clock(timeout, 0,
                        HRTIMER_MODE_ABS, CLOCK_REALTIME);
 
-               if (ewp->state == STATE_READY) {
+               if (READ_ONCE(ewp->state) == STATE_READY) {
+                       /* see MQ_BARRIER for purpose/pairing */
+                       smp_acquire__after_ctrl_dep();
                        retval = 0;
                        goto out;
                }
                spin_lock(&info->lock);
-               if (ewp->state == STATE_READY) {
+
+               /* we hold info->lock, so no memory barrier required */
+               if (READ_ONCE(ewp->state) == STATE_READY) {
                        retval = 0;
                        goto out_unlock;
                }
@@ -918,6 +983,18 @@ out_name:
  * The same algorithm is used for senders.
  */
 
+static inline void __pipelined_op(struct wake_q_head *wake_q,
+                                 struct mqueue_inode_info *info,
+                                 struct ext_wait_queue *this)
+{
+       list_del(&this->list);
+       get_task_struct(this->task);
+
+       /* see MQ_BARRIER for purpose/pairing */
+       smp_store_release(&this->state, STATE_READY);
+       wake_q_add_safe(wake_q, this->task);
+}
+
 /* pipelined_send() - send a message directly to the task waiting in
  * sys_mq_timedreceive() (without inserting message into a queue).
  */
@@ -927,17 +1004,7 @@ static inline void pipelined_send(struct wake_q_head *wake_q,
                                  struct ext_wait_queue *receiver)
 {
        receiver->msg = message;
-       list_del(&receiver->list);
-       wake_q_add(wake_q, receiver->task);
-       /*
-        * Rely on the implicit cmpxchg barrier from wake_q_add such
-        * that we can ensure that updating receiver->state is the last
-        * write operation: As once set, the receiver can continue,
-        * and if we don't have the reference count from the wake_q,
-        * yet, at that point we can later have a use-after-free
-        * condition and bogus wakeup.
-        */
-       receiver->state = STATE_READY;
+       __pipelined_op(wake_q, info, receiver);
 }
 
 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
@@ -955,9 +1022,7 @@ static inline void pipelined_receive(struct wake_q_head *wake_q,
        if (msg_insert(sender->msg, info))
                return;
 
-       list_del(&sender->list);
-       wake_q_add(wake_q, sender->task);
-       sender->state = STATE_READY;
+       __pipelined_op(wake_q, info, sender);
 }
 
 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
@@ -1044,7 +1109,9 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
                } else {
                        wait.task = current;
                        wait.msg = (void *) msg_ptr;
-                       wait.state = STATE_NONE;
+
+                       /* memory barrier not required, we hold info->lock */
+                       WRITE_ONCE(wait.state, STATE_NONE);
                        ret = wq_sleep(info, SEND, timeout, &wait);
                        /*
                         * wq_sleep must be called with info->lock held, and
@@ -1147,7 +1214,9 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
                        ret = -EAGAIN;
                } else {
                        wait.task = current;
-                       wait.state = STATE_NONE;
+
+                       /* memory barrier not required, we hold info->lock */
+                       WRITE_ONCE(wait.state, STATE_NONE);
                        ret = wq_sleep(info, RECV, timeout, &wait);
                        msg_ptr = wait.msg;
                }
index 8dec945..caca673 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -61,6 +61,16 @@ struct msg_queue {
        struct list_head q_senders;
 } __randomize_layout;
 
+/*
+ * MSG_BARRIER Locking:
+ *
+ * Similar to the optimization used in ipc/mqueue.c, one syscall return path
+ * does not acquire any locks when it sees that a message exists in
+ * msg_receiver.r_msg. Therefore r_msg is set using smp_store_release()
+ * and accessed using READ_ONCE()+smp_acquire__after_ctrl_dep(). In addition,
+ * wake_q_add_safe() is used. See ipc/mqueue.c for more details
+ */
+
 /* one msg_receiver structure for each sleeping receiver */
 struct msg_receiver {
        struct list_head        r_list;
@@ -184,6 +194,10 @@ static inline void ss_add(struct msg_queue *msq,
 {
        mss->tsk = current;
        mss->msgsz = msgsz;
+       /*
+        * No memory barrier required: we did ipc_lock_object(),
+        * and the waker obtains that lock before calling wake_q_add().
+        */
        __set_current_state(TASK_INTERRUPTIBLE);
        list_add_tail(&mss->list, &msq->q_senders);
 }
@@ -237,8 +251,11 @@ static void expunge_all(struct msg_queue *msq, int res,
        struct msg_receiver *msr, *t;
 
        list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
-               wake_q_add(wake_q, msr->r_tsk);
-               WRITE_ONCE(msr->r_msg, ERR_PTR(res));
+               get_task_struct(msr->r_tsk);
+
+               /* see MSG_BARRIER for purpose/pairing */
+               smp_store_release(&msr->r_msg, ERR_PTR(res));
+               wake_q_add_safe(wake_q, msr->r_tsk);
        }
 }
 
@@ -377,7 +394,7 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
  * NOTE: no locks must be held, the rwsem is taken inside this function.
  */
 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
-                       struct msqid64_ds *msqid64)
+                       struct ipc64_perm *perm, int msg_qbytes)
 {
        struct kern_ipc_perm *ipcp;
        struct msg_queue *msq;
@@ -387,7 +404,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
        rcu_read_lock();
 
        ipcp = ipcctl_obtain_check(ns, &msg_ids(ns), msqid, cmd,
-                                     &msqid64->msg_perm, msqid64->msg_qbytes);
+                                     perm, msg_qbytes);
        if (IS_ERR(ipcp)) {
                err = PTR_ERR(ipcp);
                goto out_unlock1;
@@ -409,18 +426,18 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
        {
                DEFINE_WAKE_Q(wake_q);
 
-               if (msqid64->msg_qbytes > ns->msg_ctlmnb &&
+               if (msg_qbytes > ns->msg_ctlmnb &&
                    !capable(CAP_SYS_RESOURCE)) {
                        err = -EPERM;
                        goto out_unlock1;
                }
 
                ipc_lock_object(&msq->q_perm);
-               err = ipc_update_perm(&msqid64->msg_perm, ipcp);
+               err = ipc_update_perm(perm, ipcp);
                if (err)
                        goto out_unlock0;
 
-               msq->q_qbytes = msqid64->msg_qbytes;
+               msq->q_qbytes = msg_qbytes;
 
                msq->q_ctime = ktime_get_real_seconds();
                /*
@@ -601,9 +618,10 @@ static long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf, int ver
        case IPC_SET:
                if (copy_msqid_from_user(&msqid64, buf, version))
                        return -EFAULT;
-               /* fallthru */
+               return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm,
+                                  msqid64.msg_qbytes);
        case IPC_RMID:
-               return msgctl_down(ns, msqid, cmd, &msqid64);
+               return msgctl_down(ns, msqid, cmd, NULL, 0);
        default:
                return  -EINVAL;
        }
@@ -735,9 +753,9 @@ static long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr, int versio
        case IPC_SET:
                if (copy_compat_msqid_from_user(&msqid64, uptr, version))
                        return -EFAULT;
-               /* fallthru */
+               return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes);
        case IPC_RMID:
-               return msgctl_down(ns, msqid, cmd, &msqid64);
+               return msgctl_down(ns, msqid, cmd, NULL, 0);
        default:
                return -EINVAL;
        }
@@ -798,13 +816,17 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
                        list_del(&msr->r_list);
                        if (msr->r_maxsize < msg->m_ts) {
                                wake_q_add(wake_q, msr->r_tsk);
-                               WRITE_ONCE(msr->r_msg, ERR_PTR(-E2BIG));
+
+                               /* See expunge_all regarding memory barrier */
+                               smp_store_release(&msr->r_msg, ERR_PTR(-E2BIG));
                        } else {
                                ipc_update_pid(&msq->q_lrpid, task_pid(msr->r_tsk));
                                msq->q_rtime = ktime_get_real_seconds();
 
                                wake_q_add(wake_q, msr->r_tsk);
-                               WRITE_ONCE(msr->r_msg, msg);
+
+                               /* See expunge_all regarding memory barrier */
+                               smp_store_release(&msr->r_msg, msg);
                                return 1;
                        }
                }
@@ -1154,7 +1176,11 @@ static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, in
                        msr_d.r_maxsize = INT_MAX;
                else
                        msr_d.r_maxsize = bufsz;
-               msr_d.r_msg = ERR_PTR(-EAGAIN);
+
+               /* memory barrier not require due to ipc_lock_object() */
+               WRITE_ONCE(msr_d.r_msg, ERR_PTR(-EAGAIN));
+
+               /* memory barrier not required, we own ipc_lock_object() */
                __set_current_state(TASK_INTERRUPTIBLE);
 
                ipc_unlock_object(&msq->q_perm);
@@ -1183,8 +1209,12 @@ static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, in
                 * signal) it will either see the message and continue ...
                 */
                msg = READ_ONCE(msr_d.r_msg);
-               if (msg != ERR_PTR(-EAGAIN))
+               if (msg != ERR_PTR(-EAGAIN)) {
+                       /* see MSG_BARRIER for purpose/pairing */
+                       smp_acquire__after_ctrl_dep();
+
                        goto out_unlock1;
+               }
 
                 /*
                  * ... or see -EAGAIN, acquire the lock to check the message
@@ -1192,7 +1222,7 @@ static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, in
                  */
                ipc_lock_object(&msq->q_perm);
 
-               msg = msr_d.r_msg;
+               msg = READ_ONCE(msr_d.r_msg);
                if (msg != ERR_PTR(-EAGAIN))
                        goto out_unlock0;
 
index ec97a70..4f4303f 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -205,15 +205,38 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
  *
  * Memory ordering:
  * Most ordering is enforced by using spin_lock() and spin_unlock().
- * The special case is use_global_lock:
+ *
+ * Exceptions:
+ * 1) use_global_lock: (SEM_BARRIER_1)
  * Setting it from non-zero to 0 is a RELEASE, this is ensured by
- * using smp_store_release().
+ * using smp_store_release(): Immediately after setting it to 0,
+ * a simple op can start.
  * Testing if it is non-zero is an ACQUIRE, this is ensured by using
  * smp_load_acquire().
  * Setting it from 0 to non-zero must be ordered with regards to
  * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
  * is inside a spin_lock() and after a write from 0 to non-zero a
  * spin_lock()+spin_unlock() is done.
+ *
+ * 2) queue.status: (SEM_BARRIER_2)
+ * Initialization is done while holding sem_lock(), so no further barrier is
+ * required.
+ * Setting it to a result code is a RELEASE, this is ensured by both a
+ * smp_store_release() (for case a) and while holding sem_lock()
+ * (for case b).
+ * The AQUIRE when reading the result code without holding sem_lock() is
+ * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
+ * (case a above).
+ * Reading the result code while holding sem_lock() needs no further barriers,
+ * the locks inside sem_lock() enforce ordering (case b above)
+ *
+ * 3) current->state:
+ * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
+ * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
+ * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
+ * when holding sem_lock(), no further barriers are required.
+ *
+ * See also ipc/mqueue.c for more details on the covered races.
  */
 
 #define sc_semmsl      sem_ctls[0]
@@ -344,12 +367,8 @@ static void complexmode_tryleave(struct sem_array *sma)
                return;
        }
        if (sma->use_global_lock == 1) {
-               /*
-                * Immediately after setting use_global_lock to 0,
-                * a simple op can start. Thus: all memory writes
-                * performed by the current operation must be visible
-                * before we set use_global_lock to 0.
-                */
+
+               /* See SEM_BARRIER_1 for purpose/pairing */
                smp_store_release(&sma->use_global_lock, 0);
        } else {
                sma->use_global_lock--;
@@ -400,7 +419,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                 */
                spin_lock(&sem->lock);
 
-               /* pairs with smp_store_release() */
+               /* see SEM_BARRIER_1 for purpose/pairing */
                if (!smp_load_acquire(&sma->use_global_lock)) {
                        /* fast path successful! */
                        return sops->sem_num;
@@ -766,15 +785,12 @@ would_block:
 static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
                                             struct wake_q_head *wake_q)
 {
-       wake_q_add(wake_q, q->sleeper);
-       /*
-        * Rely on the above implicit barrier, such that we can
-        * ensure that we hold reference to the task before setting
-        * q->status. Otherwise we could race with do_exit if the
-        * task is awoken by an external event before calling
-        * wake_up_process().
-        */
-       WRITE_ONCE(q->status, error);
+       get_task_struct(q->sleeper);
+
+       /* see SEM_BARRIER_2 for purpuse/pairing */
+       smp_store_release(&q->status, error);
+
+       wake_q_add_safe(wake_q, q->sleeper);
 }
 
 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -2148,9 +2164,11 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
        }
 
        do {
+               /* memory ordering ensured by the lock in sem_lock() */
                WRITE_ONCE(queue.status, -EINTR);
                queue.sleeper = current;
 
+               /* memory ordering is ensured by the lock in sem_lock() */
                __set_current_state(TASK_INTERRUPTIBLE);
                sem_unlock(sma, locknum);
                rcu_read_unlock();
@@ -2173,13 +2191,8 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
                 */
                error = READ_ONCE(queue.status);
                if (error != -EINTR) {
-                       /*
-                        * User space could assume that semop() is a memory
-                        * barrier: Without the mb(), the cpu could
-                        * speculatively read in userspace stale data that was
-                        * overwritten by the previous owner of the semaphore.
-                        */
-                       smp_mb();
+                       /* see SEM_BARRIER_2 for purpose/pairing */
+                       smp_acquire__after_ctrl_dep();
                        goto out_free;
                }
 
@@ -2189,6 +2202,9 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
                if (!ipc_valid_object(&sma->sem_perm))
                        goto out_unlock_free;
 
+               /*
+                * No necessity for any barrier: We are protect by sem_lock()
+                */
                error = READ_ONCE(queue.status);
 
                /*
index 915eacb..fe61df5 100644 (file)
@@ -126,7 +126,7 @@ void ipc_init_ids(struct ipc_ids *ids)
 }
 
 #ifdef CONFIG_PROC_FS
-static const struct file_operations sysvipc_proc_fops;
+static const struct proc_ops sysvipc_proc_ops;
 /**
  * ipc_init_proc_interface -  create a proc interface for sysipc types using a seq_file interface.
  * @path: Path in procfs
@@ -151,7 +151,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
        pde = proc_create_data(path,
                               S_IRUGO,        /* world readable */
                               NULL,           /* parent dir */
-                              &sysvipc_proc_fops,
+                              &sysvipc_proc_ops,
                               iface);
        if (!pde)
                kfree(iface);
@@ -884,10 +884,10 @@ static int sysvipc_proc_release(struct inode *inode, struct file *file)
        return seq_release_private(inode, file);
 }
 
-static const struct file_operations sysvipc_proc_fops = {
-       .open    = sysvipc_proc_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = sysvipc_proc_release,
+static const struct proc_ops sysvipc_proc_ops = {
+       .proc_open      = sysvipc_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = sysvipc_proc_release,
 };
 #endif /* CONFIG_PROC_FS */
index c09ea4c..a28c79c 100644 (file)
@@ -47,10 +47,9 @@ ikconfig_read_current(struct file *file, char __user *buf,
                                       &kernel_config_data);
 }
 
-static const struct file_operations ikconfig_file_ops = {
-       .owner = THIS_MODULE,
-       .read = ikconfig_read_current,
-       .llseek = default_llseek,
+static const struct proc_ops config_gz_proc_ops = {
+       .proc_read      = ikconfig_read_current,
+       .proc_lseek     = default_llseek,
 };
 
 static int __init ikconfig_init(void)
@@ -59,7 +58,7 @@ static int __init ikconfig_init(void)
 
        /* create the current config file */
        entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
-                           &ikconfig_file_ops);
+                           &config_gz_proc_ops);
        if (!entry)
                return -ENOMEM;
 
index dc9c643..17f9a4a 100644 (file)
@@ -4373,7 +4373,7 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_attach(struct perf_event *event,
-                              struct ring_buffer *rb);
+                              struct perf_buffer *rb);
 
 static void detach_sb_event(struct perf_event *event)
 {
@@ -5054,7 +5054,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 static __poll_t perf_poll(struct file *file, poll_table *wait)
 {
        struct perf_event *event = file->private_data;
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
        __poll_t events = EPOLLHUP;
 
        poll_wait(file, &event->waitq, wait);
@@ -5296,7 +5296,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
                return perf_event_set_bpf_prog(event, arg);
 
        case PERF_EVENT_IOC_PAUSE_OUTPUT: {
-               struct ring_buffer *rb;
+               struct perf_buffer *rb;
 
                rcu_read_lock();
                rb = rcu_dereference(event->rb);
@@ -5432,7 +5432,7 @@ static void calc_timer_values(struct perf_event *event,
 static void perf_event_init_userpage(struct perf_event *event)
 {
        struct perf_event_mmap_page *userpg;
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
 
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
@@ -5464,7 +5464,7 @@ void __weak arch_perf_update_userpage(
 void perf_event_update_userpage(struct perf_event *event)
 {
        struct perf_event_mmap_page *userpg;
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
        u64 enabled, running, now;
 
        rcu_read_lock();
@@ -5515,7 +5515,7 @@ EXPORT_SYMBOL_GPL(perf_event_update_userpage);
 static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
 {
        struct perf_event *event = vmf->vma->vm_file->private_data;
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
        vm_fault_t ret = VM_FAULT_SIGBUS;
 
        if (vmf->flags & FAULT_FLAG_MKWRITE) {
@@ -5548,9 +5548,9 @@ unlock:
 }
 
 static void ring_buffer_attach(struct perf_event *event,
-                              struct ring_buffer *rb)
+                              struct perf_buffer *rb)
 {
-       struct ring_buffer *old_rb = NULL;
+       struct perf_buffer *old_rb = NULL;
        unsigned long flags;
 
        if (event->rb) {
@@ -5608,7 +5608,7 @@ static void ring_buffer_attach(struct perf_event *event,
 
 static void ring_buffer_wakeup(struct perf_event *event)
 {
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
 
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
@@ -5619,9 +5619,9 @@ static void ring_buffer_wakeup(struct perf_event *event)
        rcu_read_unlock();
 }
 
-struct ring_buffer *ring_buffer_get(struct perf_event *event)
+struct perf_buffer *ring_buffer_get(struct perf_event *event)
 {
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
 
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
@@ -5634,7 +5634,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
        return rb;
 }
 
-void ring_buffer_put(struct ring_buffer *rb)
+void ring_buffer_put(struct perf_buffer *rb)
 {
        if (!refcount_dec_and_test(&rb->refcount))
                return;
@@ -5672,7 +5672,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       struct ring_buffer *rb = ring_buffer_get(event);
+       struct perf_buffer *rb = ring_buffer_get(event);
        struct user_struct *mmap_user = rb->mmap_user;
        int mmap_locked = rb->mmap_locked;
        unsigned long size = perf_data_size(rb);
@@ -5790,8 +5790,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        struct perf_event *event = file->private_data;
        unsigned long user_locked, user_lock_limit;
        struct user_struct *user = current_user();
+       struct perf_buffer *rb = NULL;
        unsigned long locked, lock_limit;
-       struct ring_buffer *rb = NULL;
        unsigned long vma_size;
        unsigned long nr_pages;
        long user_extra = 0, extra = 0;
@@ -6266,7 +6266,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event,
                                          size_t size)
 {
        struct perf_event *sampler = event->aux_event;
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
 
        data->aux_size = 0;
 
@@ -6299,7 +6299,7 @@ out:
        return data->aux_size;
 }
 
-long perf_pmu_snapshot_aux(struct ring_buffer *rb,
+long perf_pmu_snapshot_aux(struct perf_buffer *rb,
                           struct perf_event *event,
                           struct perf_output_handle *handle,
                           unsigned long size)
@@ -6338,8 +6338,8 @@ static void perf_aux_sample_output(struct perf_event *event,
                                   struct perf_sample_data *data)
 {
        struct perf_event *sampler = event->aux_event;
+       struct perf_buffer *rb;
        unsigned long pad;
-       struct ring_buffer *rb;
        long size;
 
        if (WARN_ON_ONCE(!sampler || !data->aux_size))
@@ -6707,7 +6707,7 @@ void perf_output_sample(struct perf_output_handle *handle,
                int wakeup_events = event->attr.wakeup_events;
 
                if (wakeup_events) {
-                       struct ring_buffer *rb = handle->rb;
+                       struct perf_buffer *rb = handle->rb;
                        int events = local_inc_return(&rb->events);
 
                        if (events >= wakeup_events) {
@@ -7150,7 +7150,7 @@ void perf_event_exec(void)
 }
 
 struct remote_output {
-       struct ring_buffer      *rb;
+       struct perf_buffer      *rb;
        int                     err;
 };
 
@@ -7158,7 +7158,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 {
        struct perf_event *parent = event->parent;
        struct remote_output *ro = data;
-       struct ring_buffer *rb = ro->rb;
+       struct perf_buffer *rb = ro->rb;
        struct stop_event_data sd = {
                .event  = event,
        };
@@ -10998,7 +10998,7 @@ err_size:
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
-       struct ring_buffer *rb = NULL;
+       struct perf_buffer *rb = NULL;
        int ret = -EINVAL;
 
        if (!output_event)
index 747d67f..f16f66b 100644 (file)
@@ -10,7 +10,7 @@
 
 #define RING_BUFFER_WRITABLE           0x01
 
-struct ring_buffer {
+struct perf_buffer {
        refcount_t                      refcount;
        struct rcu_head                 rcu_head;
 #ifdef CONFIG_PERF_USE_VMALLOC
@@ -58,17 +58,17 @@ struct ring_buffer {
        void                            *data_pages[0];
 };
 
-extern void rb_free(struct ring_buffer *rb);
+extern void rb_free(struct perf_buffer *rb);
 
 static inline void rb_free_rcu(struct rcu_head *rcu_head)
 {
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
 
-       rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+       rb = container_of(rcu_head, struct perf_buffer, rcu_head);
        rb_free(rb);
 }
 
-static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
+static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
 {
        if (!pause && rb->nr_pages)
                rb->paused = 0;
@@ -76,16 +76,16 @@ static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
                rb->paused = 1;
 }
 
-extern struct ring_buffer *
+extern struct perf_buffer *
 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 extern void perf_event_wakeup(struct perf_event *event);
-extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
+extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
                        pgoff_t pgoff, int nr_pages, long watermark, int flags);
-extern void rb_free_aux(struct ring_buffer *rb);
-extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
-extern void ring_buffer_put(struct ring_buffer *rb);
+extern void rb_free_aux(struct perf_buffer *rb);
+extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
+extern void ring_buffer_put(struct perf_buffer *rb);
 
-static inline bool rb_has_aux(struct ring_buffer *rb)
+static inline bool rb_has_aux(struct perf_buffer *rb)
 {
        return !!rb->aux_nr_pages;
 }
@@ -94,7 +94,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
                          unsigned long size, u64 flags);
 
 extern struct page *
-perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
+perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
 
 #ifdef CONFIG_PERF_USE_VMALLOC
 /*
@@ -103,25 +103,25 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  * Required for architectures that have d-cache aliasing issues.
  */
 
-static inline int page_order(struct ring_buffer *rb)
+static inline int page_order(struct perf_buffer *rb)
 {
        return rb->page_order;
 }
 
 #else
 
-static inline int page_order(struct ring_buffer *rb)
+static inline int page_order(struct perf_buffer *rb)
 {
        return 0;
 }
 #endif
 
-static inline unsigned long perf_data_size(struct ring_buffer *rb)
+static inline unsigned long perf_data_size(struct perf_buffer *rb)
 {
        return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
 }
 
-static inline unsigned long perf_aux_size(struct ring_buffer *rb)
+static inline unsigned long perf_aux_size(struct perf_buffer *rb)
 {
        return rb->aux_nr_pages << PAGE_SHIFT;
 }
@@ -141,7 +141,7 @@ static inline unsigned long perf_aux_size(struct ring_buffer *rb)
                        buf += written;                                 \
                handle->size -= written;                                \
                if (!handle->size) {                                    \
-                       struct ring_buffer *rb = handle->rb;            \
+                       struct perf_buffer *rb = handle->rb;    \
                                                                        \
                        handle->page++;                                 \
                        handle->page &= rb->nr_pages - 1;               \
index 7ffd5c7..192b8ab 100644 (file)
@@ -35,7 +35,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
  */
 static void perf_output_get_handle(struct perf_output_handle *handle)
 {
-       struct ring_buffer *rb = handle->rb;
+       struct perf_buffer *rb = handle->rb;
 
        preempt_disable();
 
@@ -49,7 +49,7 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
 
 static void perf_output_put_handle(struct perf_output_handle *handle)
 {
-       struct ring_buffer *rb = handle->rb;
+       struct perf_buffer *rb = handle->rb;
        unsigned long head;
        unsigned int nest;
 
@@ -150,7 +150,7 @@ __perf_output_begin(struct perf_output_handle *handle,
                    struct perf_event *event, unsigned int size,
                    bool backward)
 {
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
        unsigned long tail, offset, head;
        int have_lost, page_shift;
        struct {
@@ -301,7 +301,7 @@ void perf_output_end(struct perf_output_handle *handle)
 }
 
 static void
-ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
+ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
 {
        long max_size = perf_data_size(rb);
 
@@ -361,7 +361,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
 {
        struct perf_event *output_event = event;
        unsigned long aux_head, aux_tail;
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
        unsigned int nest;
 
        if (output_event->parent)
@@ -449,7 +449,7 @@ err:
 }
 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
 
-static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
 {
        if (rb->aux_overwrite)
                return false;
@@ -475,7 +475,7 @@ static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
 {
        bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
-       struct ring_buffer *rb = handle->rb;
+       struct perf_buffer *rb = handle->rb;
        unsigned long aux_head;
 
        /* in overwrite mode, driver provides aux_head via handle */
@@ -532,7 +532,7 @@ EXPORT_SYMBOL_GPL(perf_aux_output_end);
  */
 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
 {
-       struct ring_buffer *rb = handle->rb;
+       struct perf_buffer *rb = handle->rb;
 
        if (size > handle->size)
                return -ENOSPC;
@@ -569,8 +569,8 @@ long perf_output_copy_aux(struct perf_output_handle *aux_handle,
                          struct perf_output_handle *handle,
                          unsigned long from, unsigned long to)
 {
+       struct perf_buffer *rb = aux_handle->rb;
        unsigned long tocopy, remainder, len = 0;
-       struct ring_buffer *rb = aux_handle->rb;
        void *addr;
 
        from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
@@ -626,7 +626,7 @@ static struct page *rb_alloc_aux_page(int node, int order)
        return page;
 }
 
-static void rb_free_aux_page(struct ring_buffer *rb, int idx)
+static void rb_free_aux_page(struct perf_buffer *rb, int idx)
 {
        struct page *page = virt_to_page(rb->aux_pages[idx]);
 
@@ -635,7 +635,7 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
        __free_page(page);
 }
 
-static void __rb_free_aux(struct ring_buffer *rb)
+static void __rb_free_aux(struct perf_buffer *rb)
 {
        int pg;
 
@@ -662,7 +662,7 @@ static void __rb_free_aux(struct ring_buffer *rb)
        }
 }
 
-int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
+int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
                 pgoff_t pgoff, int nr_pages, long watermark, int flags)
 {
        bool overwrite = !(flags & RING_BUFFER_WRITABLE);
@@ -753,7 +753,7 @@ out:
        return ret;
 }
 
-void rb_free_aux(struct ring_buffer *rb)
+void rb_free_aux(struct perf_buffer *rb)
 {
        if (refcount_dec_and_test(&rb->aux_refcount))
                __rb_free_aux(rb);
@@ -766,7 +766,7 @@ void rb_free_aux(struct ring_buffer *rb)
  */
 
 static struct page *
-__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 {
        if (pgoff > rb->nr_pages)
                return NULL;
@@ -798,13 +798,13 @@ static void perf_mmap_free_page(void *addr)
        __free_page(page);
 }
 
-struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 {
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
        unsigned long size;
        int i;
 
-       size = sizeof(struct ring_buffer);
+       size = sizeof(struct perf_buffer);
        size += nr_pages * sizeof(void *);
 
        if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
@@ -843,7 +843,7 @@ fail:
        return NULL;
 }
 
-void rb_free(struct ring_buffer *rb)
+void rb_free(struct perf_buffer *rb)
 {
        int i;
 
@@ -854,13 +854,13 @@ void rb_free(struct ring_buffer *rb)
 }
 
 #else
-static int data_page_nr(struct ring_buffer *rb)
+static int data_page_nr(struct perf_buffer *rb)
 {
        return rb->nr_pages << page_order(rb);
 }
 
 static struct page *
-__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 {
        /* The '>' counts in the user page. */
        if (pgoff > data_page_nr(rb))
@@ -878,11 +878,11 @@ static void perf_mmap_unmark_page(void *addr)
 
 static void rb_free_work(struct work_struct *work)
 {
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
        void *base;
        int i, nr;
 
-       rb = container_of(work, struct ring_buffer, work);
+       rb = container_of(work, struct perf_buffer, work);
        nr = data_page_nr(rb);
 
        base = rb->user_page;
@@ -894,18 +894,18 @@ static void rb_free_work(struct work_struct *work)
        kfree(rb);
 }
 
-void rb_free(struct ring_buffer *rb)
+void rb_free(struct perf_buffer *rb)
 {
        schedule_work(&rb->work);
 }
 
-struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 {
-       struct ring_buffer *rb;
+       struct perf_buffer *rb;
        unsigned long size;
        void *all_buf;
 
-       size = sizeof(struct ring_buffer);
+       size = sizeof(struct perf_buffer);
        size += sizeof(void *);
 
        rb = kzalloc(size, GFP_KERNEL);
@@ -939,7 +939,7 @@ fail:
 #endif
 
 struct page *
-perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 {
        if (rb->aux_nr_pages) {
                /* above AUX space */
index cfc4f08..9e5783d 100644 (file)
@@ -176,20 +176,20 @@ static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
        return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
 }
 
-static const struct file_operations irq_affinity_proc_fops = {
-       .open           = irq_affinity_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = irq_affinity_proc_write,
+static const struct proc_ops irq_affinity_proc_ops = {
+       .proc_open      = irq_affinity_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = irq_affinity_proc_write,
 };
 
-static const struct file_operations irq_affinity_list_proc_fops = {
-       .open           = irq_affinity_list_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = irq_affinity_list_proc_write,
+static const struct proc_ops irq_affinity_list_proc_ops = {
+       .proc_open      = irq_affinity_list_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = irq_affinity_list_proc_write,
 };
 
 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -246,12 +246,12 @@ static int default_affinity_open(struct inode *inode, struct file *file)
        return single_open(file, default_affinity_show, PDE_DATA(inode));
 }
 
-static const struct file_operations default_affinity_proc_fops = {
-       .open           = default_affinity_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = default_affinity_write,
+static const struct proc_ops default_affinity_proc_ops = {
+       .proc_open      = default_affinity_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = default_affinity_write,
 };
 
 static int irq_node_proc_show(struct seq_file *m, void *v)
@@ -342,7 +342,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 #ifdef CONFIG_SMP
        /* create /proc/irq/<irq>/smp_affinity */
        proc_create_data("smp_affinity", 0644, desc->dir,
-                        &irq_affinity_proc_fops, irqp);
+                        &irq_affinity_proc_ops, irqp);
 
        /* create /proc/irq/<irq>/affinity_hint */
        proc_create_single_data("affinity_hint", 0444, desc->dir,
@@ -350,7 +350,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 
        /* create /proc/irq/<irq>/smp_affinity_list */
        proc_create_data("smp_affinity_list", 0644, desc->dir,
-                        &irq_affinity_list_proc_fops, irqp);
+                        &irq_affinity_list_proc_ops, irqp);
 
        proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
                        irqp);
@@ -401,7 +401,7 @@ static void register_default_affinity_proc(void)
 {
 #ifdef CONFIG_SMP
        proc_create("irq/default_smp_affinity", 0644, NULL,
-                   &default_affinity_proc_fops);
+                   &default_affinity_proc_ops);
 #endif
 }
 
index 136ce04..d812b90 100644 (file)
@@ -698,16 +698,16 @@ const char *kdb_walk_kallsyms(loff_t *pos)
 }
 #endif /* CONFIG_KGDB_KDB */
 
-static const struct file_operations kallsyms_operations = {
-       .open = kallsyms_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = seq_release_private,
+static const struct proc_ops kallsyms_proc_ops = {
+       .proc_open      = kallsyms_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release_private,
 };
 
 static int __init kallsyms_init(void)
 {
-       proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
+       proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops);
        return 0;
 }
 device_initcall(kallsyms_init);
index e3acead..8d1c158 100644 (file)
@@ -255,17 +255,17 @@ static int lstats_open(struct inode *inode, struct file *filp)
        return single_open(filp, lstats_show, NULL);
 }
 
-static const struct file_operations lstats_fops = {
-       .open           = lstats_open,
-       .read           = seq_read,
-       .write          = lstats_write,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct proc_ops lstats_proc_ops = {
+       .proc_open      = lstats_open,
+       .proc_read      = seq_read,
+       .proc_write     = lstats_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 static int __init init_lstats_procfs(void)
 {
-       proc_create("latency_stats", 0644, NULL, &lstats_fops);
+       proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
        return 0;
 }
 
index 9bb6d24..231684c 100644 (file)
@@ -643,12 +643,12 @@ static int lock_stat_release(struct inode *inode, struct file *file)
        return seq_release(inode, file);
 }
 
-static const struct file_operations proc_lock_stat_operations = {
-       .open           = lock_stat_open,
-       .write          = lock_stat_write,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = lock_stat_release,
+static const struct proc_ops lock_stat_proc_ops = {
+       .proc_open      = lock_stat_open,
+       .proc_write     = lock_stat_write,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = lock_stat_release,
 };
 #endif /* CONFIG_LOCK_STAT */
 
@@ -660,8 +660,7 @@ static int __init lockdep_proc_init(void)
 #endif
        proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show);
 #ifdef CONFIG_LOCK_STAT
-       proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
-                   &proc_lock_stat_operations);
+       proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, &lock_stat_proc_ops);
 #endif
 
        return 0;
index 90ec5ab..33569a0 100644 (file)
@@ -4354,16 +4354,16 @@ static int modules_open(struct inode *inode, struct file *file)
        return err;
 }
 
-static const struct file_operations proc_modules_operations = {
-       .open           = modules_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops modules_proc_ops = {
+       .proc_open      = modules_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 static int __init proc_modules_init(void)
 {
-       proc_create("modules", 0, NULL, &proc_modules_operations);
+       proc_create("modules", 0, NULL, &modules_proc_ops);
        return 0;
 }
 module_init(proc_modules_init);
index 4b144b0..6f69a41 100644 (file)
@@ -442,18 +442,18 @@ static ssize_t prof_cpu_mask_proc_write(struct file *file,
        return err;
 }
 
-static const struct file_operations prof_cpu_mask_proc_fops = {
-       .open           = prof_cpu_mask_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = prof_cpu_mask_proc_write,
+static const struct proc_ops prof_cpu_mask_proc_ops = {
+       .proc_open      = prof_cpu_mask_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
+       .proc_write     = prof_cpu_mask_proc_write,
 };
 
 void create_prof_cpu_mask(void)
 {
        /* create /proc/irq/prof_cpu_mask */
-       proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
+       proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_ops);
 }
 
 /*
@@ -517,10 +517,10 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
        return count;
 }
 
-static const struct file_operations proc_profile_operations = {
-       .read           = read_profile,
-       .write          = write_profile,
-       .llseek         = default_llseek,
+static const struct proc_ops profile_proc_ops = {
+       .proc_read      = read_profile,
+       .proc_write     = write_profile,
+       .proc_lseek     = default_llseek,
 };
 
 int __ref create_proc_profile(void)
@@ -548,7 +548,7 @@ int __ref create_proc_profile(void)
        err = 0;
 #endif
        entry = proc_create("profile", S_IWUSR | S_IRUGO,
-                           NULL, &proc_profile_operations);
+                           NULL, &profile_proc_ops);
        if (!entry)
                goto err_state_onl;
        proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
index db7b50b..ac4bd0c 100644 (file)
@@ -1251,40 +1251,40 @@ static int psi_fop_release(struct inode *inode, struct file *file)
        return single_release(inode, file);
 }
 
-static const struct file_operations psi_io_fops = {
-       .open           = psi_io_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .write          = psi_io_write,
-       .poll           = psi_fop_poll,
-       .release        = psi_fop_release,
+static const struct proc_ops psi_io_proc_ops = {
+       .proc_open      = psi_io_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = psi_io_write,
+       .proc_poll      = psi_fop_poll,
+       .proc_release   = psi_fop_release,
 };
 
-static const struct file_operations psi_memory_fops = {
-       .open           = psi_memory_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .write          = psi_memory_write,
-       .poll           = psi_fop_poll,
-       .release        = psi_fop_release,
+static const struct proc_ops psi_memory_proc_ops = {
+       .proc_open      = psi_memory_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = psi_memory_write,
+       .proc_poll      = psi_fop_poll,
+       .proc_release   = psi_fop_release,
 };
 
-static const struct file_operations psi_cpu_fops = {
-       .open           = psi_cpu_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .write          = psi_cpu_write,
-       .poll           = psi_fop_poll,
-       .release        = psi_fop_release,
+static const struct proc_ops psi_cpu_proc_ops = {
+       .proc_open      = psi_cpu_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = psi_cpu_write,
+       .proc_poll      = psi_fop_poll,
+       .proc_release   = psi_fop_release,
 };
 
 static int __init psi_proc_init(void)
 {
        if (psi_enable) {
                proc_mkdir("pressure", NULL);
-               proc_create("pressure/io", 0, NULL, &psi_io_fops);
-               proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
-               proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
+               proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
+               proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
+               proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
        }
        return 0;
 }
index 25a0fcf..91e8851 100644 (file)
@@ -141,6 +141,15 @@ menuconfig FTRACE
 
 if FTRACE
 
+config BOOTTIME_TRACING
+       bool "Boot-time Tracing support"
+       depends on BOOT_CONFIG && TRACING
+       default y
+       help
+         Enable developer to setup ftrace subsystem via supplemental
+         kernel cmdline at boot time for debugging (tracing) driver
+         initialization and boot process.
+
 config FUNCTION_TRACER
        bool "Kernel Function Tracer"
        depends on HAVE_FUNCTION_TRACER
@@ -172,6 +181,77 @@ config FUNCTION_GRAPH_TRACER
          the return value. This is done by setting the current return
          address on the current task structure into a stack of calls.
 
+config DYNAMIC_FTRACE
+       bool "enable/disable function tracing dynamically"
+       depends on FUNCTION_TRACER
+       depends on HAVE_DYNAMIC_FTRACE
+       default y
+       help
+         This option will modify all the calls to function tracing
+         dynamically (will patch them out of the binary image and
+         replace them with a No-Op instruction) on boot up. During
+         compile time, a table is made of all the locations that ftrace
+         can function trace, and this table is linked into the kernel
+         image. When this is enabled, functions can be individually
+         enabled, and the functions not enabled will not affect
+         performance of the system.
+
+         See the files in /sys/kernel/debug/tracing:
+           available_filter_functions
+           set_ftrace_filter
+           set_ftrace_notrace
+
+         This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
+         otherwise has native performance as long as no tracing is active.
+
+config DYNAMIC_FTRACE_WITH_REGS
+       def_bool y
+       depends on DYNAMIC_FTRACE
+       depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+
+config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+       def_bool y
+       depends on DYNAMIC_FTRACE
+       depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+
+config FUNCTION_PROFILER
+       bool "Kernel function profiler"
+       depends on FUNCTION_TRACER
+       default n
+       help
+         This option enables the kernel function profiler. A file is created
+         in debugfs called function_profile_enabled which defaults to zero.
+         When a 1 is echoed into this file profiling begins, and when a
+         zero is entered, profiling stops. A "functions" file is created in
+         the trace_stat directory; this file shows the list of functions that
+         have been hit and their counters.
+
+         If in doubt, say N.
+
+config STACK_TRACER
+       bool "Trace max stack"
+       depends on HAVE_FUNCTION_TRACER
+       select FUNCTION_TRACER
+       select STACKTRACE
+       select KALLSYMS
+       help
+         This special tracer records the maximum stack footprint of the
+         kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
+
+         This tracer works by hooking into every function call that the
+         kernel executes, and keeping a maximum stack depth value and
+         stack-trace saved.  If this is configured with DYNAMIC_FTRACE
+         then it will not have any overhead while the stack tracer
+         is disabled.
+
+         To enable the stack tracer on bootup, pass in 'stacktrace'
+         on the kernel command line.
+
+         The stack tracer can also be enabled or disabled via the
+         sysctl kernel.stack_tracer_enabled
+
+         Say N if unsure.
+
 config TRACE_PREEMPT_TOGGLE
        bool
        help
@@ -282,6 +362,19 @@ config HWLAT_TRACER
         file. Every time a latency is greater than tracing_thresh, it will
         be recorded into the ring buffer.
 
+config MMIOTRACE
+       bool "Memory mapped IO tracing"
+       depends on HAVE_MMIOTRACE_SUPPORT && PCI
+       select GENERIC_TRACER
+       help
+         Mmiotrace traces Memory Mapped I/O access and is meant for
+         debugging and reverse engineering. It is called from the ioremap
+         implementation and works via page faults. Tracing is disabled by
+         default and can be enabled at run-time.
+
+         See Documentation/trace/mmiotrace.rst.
+         If you are not helping to develop drivers, say N.
+
 config ENABLE_DEFAULT_TRACERS
        bool "Trace process context switches and events"
        depends on !GENERIC_TRACER
@@ -410,30 +503,6 @@ config BRANCH_TRACER
 
          Say N if unsure.
 
-config STACK_TRACER
-       bool "Trace max stack"
-       depends on HAVE_FUNCTION_TRACER
-       select FUNCTION_TRACER
-       select STACKTRACE
-       select KALLSYMS
-       help
-         This special tracer records the maximum stack footprint of the
-         kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
-
-         This tracer works by hooking into every function call that the
-         kernel executes, and keeping a maximum stack depth value and
-         stack-trace saved.  If this is configured with DYNAMIC_FTRACE
-         then it will not have any overhead while the stack tracer
-         is disabled.
-
-         To enable the stack tracer on bootup, pass in 'stacktrace'
-         on the kernel command line.
-
-         The stack tracer can also be enabled or disabled via the
-         sysctl kernel.stack_tracer_enabled
-
-         Say N if unsure.
-
 config BLK_DEV_IO_TRACE
        bool "Support for tracing block IO actions"
        depends on SYSFS
@@ -531,53 +600,6 @@ config DYNAMIC_EVENTS
 config PROBE_EVENTS
        def_bool n
 
-config DYNAMIC_FTRACE
-       bool "enable/disable function tracing dynamically"
-       depends on FUNCTION_TRACER
-       depends on HAVE_DYNAMIC_FTRACE
-       default y
-       help
-         This option will modify all the calls to function tracing
-         dynamically (will patch them out of the binary image and
-         replace them with a No-Op instruction) on boot up. During
-         compile time, a table is made of all the locations that ftrace
-         can function trace, and this table is linked into the kernel
-         image. When this is enabled, functions can be individually
-         enabled, and the functions not enabled will not affect
-         performance of the system.
-
-         See the files in /sys/kernel/debug/tracing:
-           available_filter_functions
-           set_ftrace_filter
-           set_ftrace_notrace
-
-         This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
-         otherwise has native performance as long as no tracing is active.
-
-config DYNAMIC_FTRACE_WITH_REGS
-       def_bool y
-       depends on DYNAMIC_FTRACE
-       depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
-
-config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
-       def_bool y
-       depends on DYNAMIC_FTRACE
-       depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
-
-config FUNCTION_PROFILER
-       bool "Kernel function profiler"
-       depends on FUNCTION_TRACER
-       default n
-       help
-         This option enables the kernel function profiler. A file is created
-         in debugfs called function_profile_enabled which defaults to zero.
-         When a 1 is echoed into this file profiling begins, and when a
-         zero is entered, profiling stops. A "functions" file is created in
-         the trace_stat directory; this file shows the list of functions that
-         have been hit and their counters.
-
-         If in doubt, say N.
-
 config BPF_KPROBE_OVERRIDE
        bool "Enable BPF programs to override a kprobed function"
        depends on BPF_EVENTS
@@ -592,54 +614,6 @@ config FTRACE_MCOUNT_RECORD
        depends on DYNAMIC_FTRACE
        depends on HAVE_FTRACE_MCOUNT_RECORD
 
-config FTRACE_SELFTEST
-       bool
-
-config FTRACE_STARTUP_TEST
-       bool "Perform a startup test on ftrace"
-       depends on GENERIC_TRACER
-       select FTRACE_SELFTEST
-       help
-         This option performs a series of startup tests on ftrace. On bootup
-         a series of tests are made to verify that the tracer is
-         functioning properly. It will do tests on all the configured
-         tracers of ftrace.
-
-config EVENT_TRACE_STARTUP_TEST
-       bool "Run selftest on trace events"
-       depends on FTRACE_STARTUP_TEST
-       default y
-       help
-         This option performs a test on all trace events in the system.
-         It basically just enables each event and runs some code that
-         will trigger events (not necessarily the event it enables)
-         This may take some time run as there are a lot of events.
-
-config EVENT_TRACE_TEST_SYSCALLS
-       bool "Run selftest on syscall events"
-       depends on EVENT_TRACE_STARTUP_TEST
-       help
-        This option will also enable testing every syscall event.
-        It only enables the event and disables it and runs various loads
-        with the event enabled. This adds a bit more time for kernel boot
-        up since it runs this on every system call defined.
-
-        TBD - enable a way to actually call the syscalls as we test their
-              events
-
-config MMIOTRACE
-       bool "Memory mapped IO tracing"
-       depends on HAVE_MMIOTRACE_SUPPORT && PCI
-       select GENERIC_TRACER
-       help
-         Mmiotrace traces Memory Mapped I/O access and is meant for
-         debugging and reverse engineering. It is called from the ioremap
-         implementation and works via page faults. Tracing is disabled by
-         default and can be enabled at run-time.
-
-         See Documentation/trace/mmiotrace.rst.
-         If you are not helping to develop drivers, say N.
-
 config TRACING_MAP
        bool
        depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -680,16 +654,6 @@ config TRACE_EVENT_INJECT
 
          If unsure, say N.
 
-config MMIOTRACE_TEST
-       tristate "Test module for mmiotrace"
-       depends on MMIOTRACE && m
-       help
-         This is a dumb module for testing mmiotrace. It is very dangerous
-         as it will write garbage to IO memory starting at a given address.
-         However, it should be safe to use on e.g. unused portion of VRAM.
-
-         Say N, unless you absolutely know what you are doing.
-
 config TRACEPOINT_BENCHMARK
        bool "Add tracepoint that benchmarks tracepoints"
        help
@@ -736,6 +700,81 @@ config RING_BUFFER_BENCHMARK
 
          If unsure, say N.
 
+config TRACE_EVAL_MAP_FILE
+       bool "Show eval mappings for trace events"
+       depends on TRACING
+       help
+       The "print fmt" of the trace events will show the enum/sizeof names
+       instead of their values. This can cause problems for user space tools
+       that use this string to parse the raw data as user space does not know
+       how to convert the string to its value.
+
+       To fix this, there's a special macro in the kernel that can be used
+       to convert an enum/sizeof into its value. If this macro is used, then
+       the print fmt strings will be converted to their values.
+
+       If something does not get converted properly, this option can be
+       used to show what enums/sizeof the kernel tried to convert.
+
+       This option is for debugging the conversions. A file is created
+       in the tracing directory called "eval_map" that will show the
+       names matched with their values and what trace event system they
+       belong too.
+
+       Normally, the mapping of the strings to values will be freed after
+       boot up or module load. With this option, they will not be freed, as
+       they are needed for the "eval_map" file. Enabling this option will
+       increase the memory footprint of the running kernel.
+
+       If unsure, say N.
+
+config GCOV_PROFILE_FTRACE
+       bool "Enable GCOV profiling on ftrace subsystem"
+       depends on GCOV_KERNEL
+       help
+         Enable GCOV profiling on ftrace subsystem for checking
+         which functions/lines are tested.
+
+         If unsure, say N.
+
+         Note that on a kernel compiled with this config, ftrace will
+         run significantly slower.
+
+config FTRACE_SELFTEST
+       bool
+
+config FTRACE_STARTUP_TEST
+       bool "Perform a startup test on ftrace"
+       depends on GENERIC_TRACER
+       select FTRACE_SELFTEST
+       help
+         This option performs a series of startup tests on ftrace. On bootup
+         a series of tests are made to verify that the tracer is
+         functioning properly. It will do tests on all the configured
+         tracers of ftrace.
+
+config EVENT_TRACE_STARTUP_TEST
+       bool "Run selftest on trace events"
+       depends on FTRACE_STARTUP_TEST
+       default y
+       help
+         This option performs a test on all trace events in the system.
+         It basically just enables each event and runs some code that
+         will trigger events (not necessarily the event it enables)
+         This may take some time run as there are a lot of events.
+
+config EVENT_TRACE_TEST_SYSCALLS
+       bool "Run selftest on syscall events"
+       depends on EVENT_TRACE_STARTUP_TEST
+       help
+        This option will also enable testing every syscall event.
+        It only enables the event and disables it and runs various loads
+        with the event enabled. This adds a bit more time for kernel boot
+        up since it runs this on every system call defined.
+
+        TBD - enable a way to actually call the syscalls as we test their
+              events
+
 config RING_BUFFER_STARTUP_TEST
        bool "Ring buffer startup self test"
        depends on RING_BUFFER
@@ -759,8 +798,18 @@ config RING_BUFFER_STARTUP_TEST
 
         If unsure, say N
 
+config MMIOTRACE_TEST
+       tristate "Test module for mmiotrace"
+       depends on MMIOTRACE && m
+       help
+         This is a dumb module for testing mmiotrace. It is very dangerous
+         as it will write garbage to IO memory starting at a given address.
+         However, it should be safe to use on e.g. unused portion of VRAM.
+
+         Say N, unless you absolutely know what you are doing.
+
 config PREEMPTIRQ_DELAY_TEST
-       tristate "Preempt / IRQ disable delay thread to test latency tracers"
+       tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
        depends on m
        help
          Select this option to build a test module that can help test latency
@@ -774,45 +823,30 @@ config PREEMPTIRQ_DELAY_TEST
 
          If unsure, say N
 
-config TRACE_EVAL_MAP_FILE
-       bool "Show eval mappings for trace events"
-       depends on TRACING
-       help
-       The "print fmt" of the trace events will show the enum/sizeof names
-       instead of their values. This can cause problems for user space tools
-       that use this string to parse the raw data as user space does not know
-       how to convert the string to its value.
-
-       To fix this, there's a special macro in the kernel that can be used
-       to convert an enum/sizeof into its value. If this macro is used, then
-       the print fmt strings will be converted to their values.
-
-       If something does not get converted properly, this option can be
-       used to show what enums/sizeof the kernel tried to convert.
-
-       This option is for debugging the conversions. A file is created
-       in the tracing directory called "eval_map" that will show the
-       names matched with their values and what trace event system they
-       belong too.
+config SYNTH_EVENT_GEN_TEST
+       tristate "Test module for in-kernel synthetic event generation"
+       depends on HIST_TRIGGERS
+       help
+          This option creates a test module to check the base
+          functionality of in-kernel synthetic event definition and
+          generation.
 
-       Normally, the mapping of the strings to values will be freed after
-       boot up or module load. With this option, they will not be freed, as
-       they are needed for the "eval_map" file. Enabling this option will
-       increase the memory footprint of the running kernel.
+          To test, insert the module, and then check the trace buffer
+         for the generated sample events.
 
-       If unsure, say N.
+         If unsure, say N.
 
-config GCOV_PROFILE_FTRACE
-       bool "Enable GCOV profiling on ftrace subsystem"
-       depends on GCOV_KERNEL
+config KPROBE_EVENT_GEN_TEST
+       tristate "Test module for in-kernel kprobe event generation"
+       depends on KPROBE_EVENTS
        help
-         Enable GCOV profiling on ftrace subsystem for checking
-         which functions/lines are tested.
+          This option creates a test module to check the base
+          functionality of in-kernel kprobe event definition.
 
-         If unsure, say N.
+          To test, insert the module, and then check the trace buffer
+         for the generated kprobe events.
 
-         Note that on a kernel compiled with this config, ftrace will
-         run significantly slower.
+         If unsure, say N.
 
 endif # FTRACE
 
index 0e63db6..f9dcd19 100644 (file)
@@ -44,6 +44,8 @@ obj-$(CONFIG_TRACING) += trace_stat.o
 obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_TRACING_MAP) += tracing_map.o
 obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o
+obj-$(CONFIG_SYNTH_EVENT_GEN_TEST) += synth_event_gen_test.o
+obj-$(CONFIG_KPROBE_EVENT_GEN_TEST) += kprobe_event_gen_test.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
 obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o
@@ -83,6 +85,7 @@ endif
 obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
 obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
+obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
 
 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
 
index 475e294..0735ae8 100644 (file)
@@ -68,14 +68,14 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
 {
        struct blk_io_trace *t;
        struct ring_buffer_event *event = NULL;
-       struct ring_buffer *buffer = NULL;
+       struct trace_buffer *buffer = NULL;
        int pc = 0;
        int cpu = smp_processor_id();
        bool blk_tracer = blk_tracer_enabled;
        ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
 
        if (blk_tracer) {
-               buffer = blk_tr->trace_buffer.buffer;
+               buffer = blk_tr->array_buffer.buffer;
                pc = preempt_count();
                event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
                                                  sizeof(*t) + len + cgid_len,
@@ -215,7 +215,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
 {
        struct task_struct *tsk = current;
        struct ring_buffer_event *event = NULL;
-       struct ring_buffer *buffer = NULL;
+       struct trace_buffer *buffer = NULL;
        struct blk_io_trace *t;
        unsigned long flags = 0;
        unsigned long *sequence;
@@ -248,7 +248,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
        if (blk_tracer) {
                tracing_record_cmdline(current);
 
-               buffer = blk_tr->trace_buffer.buffer;
+               buffer = blk_tr->array_buffer.buffer;
                pc = preempt_count();
                event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
                                                  sizeof(*t) + pdu_len + cgid_len,
index 9bf1f2c..3f7ee10 100644 (file)
@@ -62,8 +62,6 @@
        })
 
 /* hash bits for specific function selection */
-#define FTRACE_HASH_BITS 7
-#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
 #define FTRACE_HASH_DEFAULT_BITS 10
 #define FTRACE_HASH_MAX_BITS 12
 
@@ -146,7 +144,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 {
        struct trace_array *tr = op->private;
 
-       if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
+       if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
                return;
 
        op->saved_func(ip, parent_ip, op, regs);
@@ -1103,9 +1101,6 @@ struct ftrace_page {
 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
 
-/* estimate from running different kernels */
-#define NR_TO_INIT             10000
-
 static struct ftrace_page      *ftrace_pages_start;
 static struct ftrace_page      *ftrace_pages;
 
@@ -5464,7 +5459,7 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
        struct ftrace_hash *hash;
 
        hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
-       if (WARN_ON(!hash))
+       if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
                return;
 
        while (buf) {
@@ -5596,8 +5591,8 @@ static const struct file_operations ftrace_notrace_fops = {
 
 static DEFINE_MUTEX(graph_lock);
 
-struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
-struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
 
 enum graph_filter_type {
        GRAPH_FILTER_NOTRACE    = 0,
@@ -5872,8 +5867,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&graph_lock);
 
-               /* Wait till all users are no longer using the old hash */
-               synchronize_rcu();
+               /*
+                * We need to do a hard force of sched synchronization.
+                * This is because we use preempt_disable() to do RCU, but
+                * the function tracers can be called where RCU is not watching
+                * (like before user_exit()). We can not rely on the RCU
+                * infrastructure to do the synchronization, thus we must do it
+                * ourselves.
+                */
+               schedule_on_each_cpu(ftrace_sync);
 
                free_ftrace_hash(old_hash);
        }
@@ -6596,7 +6598,7 @@ static void add_to_clear_hash_list(struct list_head *clear_list,
 
        func = kmalloc(sizeof(*func), GFP_KERNEL);
        if (!func) {
-               WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
+               MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
                return;
        }
 
@@ -6922,7 +6924,7 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
 
        pid_list = rcu_dereference_sched(tr->function_pids);
 
-       this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
+       this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
                       trace_ignore_this_task(pid_list, next));
 }
 
@@ -6976,7 +6978,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
        unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
 
        for_each_possible_cpu(cpu)
-               per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
+               per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
 
        rcu_assign_pointer(tr->function_pids, NULL);
 
@@ -7031,9 +7033,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
        struct trace_array *tr = m->private;
        struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
 
-       if (v == FTRACE_NO_PIDS)
+       if (v == FTRACE_NO_PIDS) {
+               (*pos)++;
                return NULL;
-
+       }
        return trace_pid_next(pid_list, v, pos);
 }
 
@@ -7100,7 +7103,7 @@ static void ignore_task_cpu(void *data)
        pid_list = rcu_dereference_protected(tr->function_pids,
                                             mutex_is_locked(&ftrace_lock));
 
-       this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
+       this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
                       trace_ignore_this_task(pid_list, current));
 }
 
diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c
new file mode 100644 (file)
index 0000000..18b0f1c
--- /dev/null
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module for in-kernel kprobe event creation and generation.
+ *
+ * Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
+ */
+
+#include <linux/module.h>
+#include <linux/trace_events.h>
+
+/*
+ * This module is a simple test of basic functionality for in-kernel
+ * kprobe/kretprobe event creation.  The first test uses
+ * kprobe_event_gen_cmd_start(), kprobe_event_add_fields() and
+ * kprobe_event_gen_cmd_end() to create a kprobe event, which is then
+ * enabled in order to generate trace output.  The second creates a
+ * kretprobe event using kretprobe_event_gen_cmd_start() and
+ * kretprobe_event_gen_cmd_end(), and is also then enabled.
+ *
+ * To test, select CONFIG_KPROBE_EVENT_GEN_TEST and build the module.
+ * Then:
+ *
+ * # insmod kernel/trace/kprobe_event_gen_test.ko
+ * # cat /sys/kernel/debug/tracing/trace
+ *
+ * You should see many instances of the "gen_kprobe_test" and
+ * "gen_kretprobe_test" events in the trace buffer.
+ *
+ * To remove the events, remove the module:
+ *
+ * # rmmod kprobe_event_gen_test
+ *
+ */
+
+static struct trace_event_file *gen_kprobe_test;
+static struct trace_event_file *gen_kretprobe_test;
+
+/*
+ * Test to make sure we can create a kprobe event, then add more
+ * fields.
+ */
+static int __init test_gen_kprobe_cmd(void)
+{
+       struct dynevent_cmd cmd;
+       char *buf;
+       int ret;
+
+       /* Create a buffer to hold the generated command */
+       buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       /* Before generating the command, initialize the cmd object */
+       kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+       /*
+        * Define the gen_kprobe_test event with the first 2 kprobe
+        * fields.
+        */
+       ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test",
+                                        "do_sys_open",
+                                        "dfd=%ax", "filename=%dx");
+       if (ret)
+               goto free;
+
+       /* Use kprobe_event_add_fields to add the rest of the fields */
+
+       ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)");
+       if (ret)
+               goto free;
+
+       /*
+        * This actually creates the event.
+        */
+       ret = kprobe_event_gen_cmd_end(&cmd);
+       if (ret)
+               goto free;
+
+       /*
+        * Now get the gen_kprobe_test event file.  We need to prevent
+        * the instance and event from disappearing from underneath
+        * us, which trace_get_event_file() does (though in this case
+        * we're using the top-level instance which never goes away).
+        */
+       gen_kprobe_test = trace_get_event_file(NULL, "kprobes",
+                                              "gen_kprobe_test");
+       if (IS_ERR(gen_kprobe_test)) {
+               ret = PTR_ERR(gen_kprobe_test);
+               goto delete;
+       }
+
+       /* Enable the event or you won't see anything */
+       ret = trace_array_set_clr_event(gen_kprobe_test->tr,
+                                       "kprobes", "gen_kprobe_test", true);
+       if (ret) {
+               trace_put_event_file(gen_kprobe_test);
+               goto delete;
+       }
+ out:
+       return ret;
+ delete:
+       /* We got an error after creating the event, delete it */
+       ret = kprobe_event_delete("gen_kprobe_test");
+ free:
+       kfree(buf);
+
+       goto out;
+}
+
+/*
+ * Test to make sure we can create a kretprobe event.
+ */
+static int __init test_gen_kretprobe_cmd(void)
+{
+       struct dynevent_cmd cmd;
+       char *buf;
+       int ret;
+
+       /* Create a buffer to hold the generated command */
+       buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       /* Before generating the command, initialize the cmd object */
+       kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+       /*
+        * Define the kretprobe event.
+        */
+       ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test",
+                                           "do_sys_open",
+                                           "$retval");
+       if (ret)
+               goto free;
+
+       /*
+        * This actually creates the event.
+        */
+       ret = kretprobe_event_gen_cmd_end(&cmd);
+       if (ret)
+               goto free;
+
+       /*
+        * Now get the gen_kretprobe_test event file.  We need to
+        * prevent the instance and event from disappearing from
+        * underneath us, which trace_get_event_file() does (though in
+        * this case we're using the top-level instance which never
+        * goes away).
+        */
+       gen_kretprobe_test = trace_get_event_file(NULL, "kprobes",
+                                                 "gen_kretprobe_test");
+       if (IS_ERR(gen_kretprobe_test)) {
+               ret = PTR_ERR(gen_kretprobe_test);
+               goto delete;
+       }
+
+       /* Enable the event or you won't see anything */
+       ret = trace_array_set_clr_event(gen_kretprobe_test->tr,
+                                       "kprobes", "gen_kretprobe_test", true);
+       if (ret) {
+               trace_put_event_file(gen_kretprobe_test);
+               goto delete;
+       }
+ out:
+       return ret;
+ delete:
+       /* We got an error after creating the event, delete it */
+       ret = kprobe_event_delete("gen_kretprobe_test");
+ free:
+       kfree(buf);
+
+       goto out;
+}
+
+static int __init kprobe_event_gen_test_init(void)
+{
+       int ret;
+
+       ret = test_gen_kprobe_cmd();
+       if (ret)
+               return ret;
+
+       ret = test_gen_kretprobe_cmd();
+       if (ret) {
+               WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+                                                 "kprobes",
+                                                 "gen_kretprobe_test", false));
+               trace_put_event_file(gen_kretprobe_test);
+               WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
+       }
+
+       return ret;
+}
+
+static void __exit kprobe_event_gen_test_exit(void)
+{
+       /* Disable the event or you can't remove it */
+       WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+                                         "kprobes",
+                                         "gen_kprobe_test", false));
+
+       /* Now give the file and instance back */
+       trace_put_event_file(gen_kprobe_test);
+
+       /* Now unregister and free the event */
+       WARN_ON(kprobe_event_delete("gen_kprobe_test"));
+
+       /* Disable the event or you can't remove it */
+       WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+                                         "kprobes",
+                                         "gen_kretprobe_test", false));
+
+       /* Now give the file and instance back */
+       trace_put_event_file(gen_kretprobe_test);
+
+       /* Now unregister and free the event */
+       WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
+}
+
+module_init(kprobe_event_gen_test_init)
+module_exit(kprobe_event_gen_test_exit)
+
+MODULE_AUTHOR("Tom Zanussi");
+MODULE_DESCRIPTION("kprobe event generation test");
+MODULE_LICENSE("GPL v2");
index 3f65537..61f0e92 100644 (file)
@@ -300,8 +300,6 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
 /* Missed count stored at end */
 #define RB_MISSED_STORED       (1 << 30)
 
-#define RB_MISSED_FLAGS                (RB_MISSED_EVENTS|RB_MISSED_STORED)
-
 struct buffer_data_page {
        u64              time_stamp;    /* page time stamp */
        local_t          commit;        /* write committed index */
@@ -443,7 +441,7 @@ enum {
 struct ring_buffer_per_cpu {
        int                             cpu;
        atomic_t                        record_disabled;
-       struct ring_buffer              *buffer;
+       struct trace_buffer     *buffer;
        raw_spinlock_t                  reader_lock;    /* serialize readers */
        arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
@@ -482,7 +480,7 @@ struct ring_buffer_per_cpu {
        struct rb_irq_work              irq_work;
 };
 
-struct ring_buffer {
+struct trace_buffer {
        unsigned                        flags;
        int                             cpus;
        atomic_t                        record_disabled;
@@ -518,7 +516,7 @@ struct ring_buffer_iter {
  *
  * Returns the number of pages used by a per_cpu buffer of the ring buffer.
  */
-size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
+size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
 {
        return buffer->buffers[cpu]->nr_pages;
 }
@@ -530,7 +528,7 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
  *
  * Returns the number of pages that have content in the ring buffer.
  */
-size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
+size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
 {
        size_t read;
        size_t cnt;
@@ -573,7 +571,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
  * as data is added to any of the @buffer's cpu buffers. Otherwise
  * it will wait for data to be added to a specific cpu buffer.
  */
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
 {
        struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
        DEFINE_WAIT(wait);
@@ -684,7 +682,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
  * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
  * zero otherwise.
  */
-__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
                          struct file *filp, poll_table *poll_table)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
@@ -742,13 +740,13 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 /* Up this if you want to test the TIME_EXTENTS and normalization */
 #define DEBUG_SHIFT 0
 
-static inline u64 rb_time_stamp(struct ring_buffer *buffer)
+static inline u64 rb_time_stamp(struct trace_buffer *buffer)
 {
        /* shift to debug/test normalization and TIME_EXTENTS */
        return buffer->clock() << DEBUG_SHIFT;
 }
 
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
 {
        u64 time;
 
@@ -760,7 +758,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 
-void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
                                      int cpu, u64 *ts)
 {
        /* Just stupid testing the normalize function and deltas */
@@ -1283,7 +1281,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct buffer_page *bpage;
@@ -1368,16 +1366,17 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  * __ring_buffer_alloc - allocate a new ring_buffer
  * @size: the size in bytes per cpu that is needed.
  * @flags: attributes to set for the ring buffer.
+ * @key: ring buffer reader_lock_key.
  *
  * Currently the only flag that is available is the RB_FL_OVERWRITE
  * flag. This flag means that the buffer will overwrite old data
  * when the buffer wraps. If this flag is not set, the buffer will
  * drop data when the tail hits the head.
  */
-struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
                                        struct lock_class_key *key)
 {
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        long nr_pages;
        int bsize;
        int cpu;
@@ -1447,7 +1446,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
  * @buffer: the buffer to free.
  */
 void
-ring_buffer_free(struct ring_buffer *buffer)
+ring_buffer_free(struct trace_buffer *buffer)
 {
        int cpu;
 
@@ -1463,18 +1462,18 @@ ring_buffer_free(struct ring_buffer *buffer)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_free);
 
-void ring_buffer_set_clock(struct ring_buffer *buffer,
+void ring_buffer_set_clock(struct trace_buffer *buffer,
                           u64 (*clock)(void))
 {
        buffer->clock = clock;
 }
 
-void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
+void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
 {
        buffer->time_stamp_abs = abs;
 }
 
-bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
+bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
 {
        return buffer->time_stamp_abs;
 }
@@ -1712,7 +1711,7 @@ static void update_pages_handler(struct work_struct *work)
  *
  * Returns 0 on success and < 0 on failure.
  */
-int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
                        int cpu_id)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
@@ -1891,7 +1890,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
 }
 EXPORT_SYMBOL_GPL(ring_buffer_resize);
 
-void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
+void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
 {
        mutex_lock(&buffer->mutex);
        if (val)
@@ -2206,7 +2205,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
 {
        struct buffer_page *tail_page = info->tail_page;
        struct buffer_page *commit_page = cpu_buffer->commit_page;
-       struct ring_buffer *buffer = cpu_buffer->buffer;
+       struct trace_buffer *buffer = cpu_buffer->buffer;
        struct buffer_page *next_page;
        int ret;
 
@@ -2330,11 +2329,11 @@ static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
 
 /**
  * rb_update_event - update event type and data
+ * @cpu_buffer: The per cpu buffer of the @event
  * @event: the event to update
- * @type: the type of event
- * @length: the size of the event field in the ring buffer
+ * @info: The info to update the @event with (contains length and delta)
  *
- * Update the type and data fields of the event. The length
+ * Update the type and data fields of the @event. The length
  * is the actual size that is written to the ring buffer,
  * and with this, we can determine what to place into the
  * data field.
@@ -2609,7 +2608,7 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static __always_inline void
-rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
 {
        size_t nr_pages;
        size_t dirty;
@@ -2733,7 +2732,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
  * Call this function before calling another ring_buffer_lock_reserve() and
  * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
  */
-void ring_buffer_nest_start(struct ring_buffer *buffer)
+void ring_buffer_nest_start(struct trace_buffer *buffer)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        int cpu;
@@ -2753,7 +2752,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer)
  * Must be called after ring_buffer_nest_start() and after the
  * ring_buffer_unlock_commit().
  */
-void ring_buffer_nest_end(struct ring_buffer *buffer)
+void ring_buffer_nest_end(struct trace_buffer *buffer)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        int cpu;
@@ -2775,7 +2774,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer)
  *
  * Must be paired with ring_buffer_lock_reserve.
  */
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+int ring_buffer_unlock_commit(struct trace_buffer *buffer,
                              struct ring_buffer_event *event)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
@@ -2868,7 +2867,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static __always_inline struct ring_buffer_event *
-rb_reserve_next_event(struct ring_buffer *buffer,
+rb_reserve_next_event(struct trace_buffer *buffer,
                      struct ring_buffer_per_cpu *cpu_buffer,
                      unsigned long length)
 {
@@ -2961,7 +2960,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
  * If NULL is returned, then nothing has been allocated or locked.
  */
 struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
+ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
@@ -3062,7 +3061,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
  * If this function is called, do not call ring_buffer_unlock_commit on
  * the event.
  */
-void ring_buffer_discard_commit(struct ring_buffer *buffer,
+void ring_buffer_discard_commit(struct trace_buffer *buffer,
                                struct ring_buffer_event *event)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
@@ -3113,7 +3112,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  * Note, like ring_buffer_lock_reserve, the length is the length of the data
  * and not the length of the event which would hold the header.
  */
-int ring_buffer_write(struct ring_buffer *buffer,
+int ring_buffer_write(struct trace_buffer *buffer,
                      unsigned long length,
                      void *data)
 {
@@ -3193,7 +3192,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  *
  * The caller should call synchronize_rcu() after this.
  */
-void ring_buffer_record_disable(struct ring_buffer *buffer)
+void ring_buffer_record_disable(struct trace_buffer *buffer)
 {
        atomic_inc(&buffer->record_disabled);
 }
@@ -3206,7 +3205,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  * Note, multiple disables will need the same number of enables
  * to truly enable the writing (much like preempt_disable).
  */
-void ring_buffer_record_enable(struct ring_buffer *buffer)
+void ring_buffer_record_enable(struct trace_buffer *buffer)
 {
        atomic_dec(&buffer->record_disabled);
 }
@@ -3223,7 +3222,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  * it works like an on/off switch, where as the disable() version
  * must be paired with a enable().
  */
-void ring_buffer_record_off(struct ring_buffer *buffer)
+void ring_buffer_record_off(struct trace_buffer *buffer)
 {
        unsigned int rd;
        unsigned int new_rd;
@@ -3246,7 +3245,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off);
  * it works like an on/off switch, where as the enable() version
  * must be paired with a disable().
  */
-void ring_buffer_record_on(struct ring_buffer *buffer)
+void ring_buffer_record_on(struct trace_buffer *buffer)
 {
        unsigned int rd;
        unsigned int new_rd;
@@ -3264,7 +3263,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_on);
  *
  * Returns true if the ring buffer is in a state that it accepts writes.
  */
-bool ring_buffer_record_is_on(struct ring_buffer *buffer)
+bool ring_buffer_record_is_on(struct trace_buffer *buffer)
 {
        return !atomic_read(&buffer->record_disabled);
 }
@@ -3280,7 +3279,7 @@ bool ring_buffer_record_is_on(struct ring_buffer *buffer)
  * ring_buffer_record_disable(), as that is a temporary disabling of
  * the ring buffer.
  */
-bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
 {
        return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
 }
@@ -3295,7 +3294,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
  *
  * The caller should call synchronize_rcu() after this.
  */
-void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
 
@@ -3315,7 +3314,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  * Note, multiple disables will need the same number of enables
  * to truly enable the writing (much like preempt_disable).
  */
-void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
 
@@ -3345,7 +3344,7 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
  * @buffer: The ring buffer
  * @cpu: The per CPU buffer to read from.
  */
-u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
 {
        unsigned long flags;
        struct ring_buffer_per_cpu *cpu_buffer;
@@ -3378,7 +3377,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
  * @buffer: The ring buffer
  * @cpu: The per CPU buffer to read from.
  */
-unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long ret;
@@ -3398,7 +3397,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
  * @buffer: The ring buffer
  * @cpu: The per CPU buffer to get the entries from.
  */
-unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
 
@@ -3417,7 +3416,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  * @buffer: The ring buffer
  * @cpu: The per CPU buffer to get the number of overruns from
  */
-unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long ret;
@@ -3440,7 +3439,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  * @cpu: The per CPU buffer to get the number of overruns from
  */
 unsigned long
-ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long ret;
@@ -3462,7 +3461,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  * @cpu: The per CPU buffer to get the number of overruns from
  */
 unsigned long
-ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long ret;
@@ -3483,7 +3482,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
  * @cpu: The per CPU buffer to get the number of events read
  */
 unsigned long
-ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
 
@@ -3502,7 +3501,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
  * Returns the total number of entries in the ring buffer
  * (all CPU entries)
  */
-unsigned long ring_buffer_entries(struct ring_buffer *buffer)
+unsigned long ring_buffer_entries(struct trace_buffer *buffer)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long entries = 0;
@@ -3525,7 +3524,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries);
  * Returns the total number of overruns in the ring buffer
  * (all CPU entries)
  */
-unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long overruns = 0;
@@ -3949,7 +3948,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_peek);
 static struct ring_buffer_event *
 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
 {
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        int nr_loops = 0;
@@ -4077,7 +4076,7 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
  * not consume the data.
  */
 struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
                 unsigned long *lost_events)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
@@ -4141,7 +4140,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  * and eventually empty the ring buffer if the producer is slower.
  */
 struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
                    unsigned long *lost_events)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
@@ -4201,7 +4200,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
  * This overall must be paired with ring_buffer_read_finish.
  */
 struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
+ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_iter *iter;
@@ -4331,8 +4330,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_read);
 /**
  * ring_buffer_size - return the size of the ring buffer (in bytes)
  * @buffer: The ring buffer.
+ * @cpu: The CPU to get ring buffer size from.
  */
-unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
 {
        /*
         * Earlier, this method returned
@@ -4398,7 +4398,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  * @buffer: The ring buffer to reset a per cpu buffer of
  * @cpu: The CPU buffer to be reset
  */
-void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        unsigned long flags;
@@ -4435,7 +4435,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  * ring_buffer_reset - reset a ring buffer
  * @buffer: The ring buffer to reset all cpu buffers
  */
-void ring_buffer_reset(struct ring_buffer *buffer)
+void ring_buffer_reset(struct trace_buffer *buffer)
 {
        int cpu;
 
@@ -4448,7 +4448,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
  * rind_buffer_empty - is the ring buffer empty?
  * @buffer: The ring buffer to test
  */
-bool ring_buffer_empty(struct ring_buffer *buffer)
+bool ring_buffer_empty(struct trace_buffer *buffer)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long flags;
@@ -4478,7 +4478,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
  * @buffer: The ring buffer
  * @cpu: The CPU buffer to test
  */
-bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
+bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long flags;
@@ -4504,14 +4504,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  * @buffer_a: One buffer to swap with
  * @buffer_b: The other buffer to swap with
+ * @cpu: the CPU of the buffers to swap
  *
  * This function is useful for tracers that want to take a "snapshot"
  * of a CPU buffer and has another back up buffer lying around.
  * it is expected that the tracer handles the cpu buffer not being
  * used at the moment.
  */
-int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
-                        struct ring_buffer *buffer_b, int cpu)
+int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+                        struct trace_buffer *buffer_b, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer_a;
        struct ring_buffer_per_cpu *cpu_buffer_b;
@@ -4590,7 +4591,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  * Returns:
  *  The page allocated, or ERR_PTR
  */
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
+void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct buffer_data_page *bpage = NULL;
@@ -4637,7 +4638,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  *
  * Free a page allocated from ring_buffer_alloc_read_page.
  */
-void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct buffer_data_page *bpage = data;
@@ -4697,7 +4698,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  *  >=0 if data has been transferred, returns the offset of consumed data.
  *  <0 if no data has been transferred.
  */
-int ring_buffer_read_page(struct ring_buffer *buffer,
+int ring_buffer_read_page(struct trace_buffer *buffer,
                          void **data_page, size_t len, int cpu, int full)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
@@ -4868,12 +4869,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  */
 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
 {
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        long nr_pages_same;
        int cpu_i;
        unsigned long nr_pages;
 
-       buffer = container_of(node, struct ring_buffer, node);
+       buffer = container_of(node, struct trace_buffer, node);
        if (cpumask_test_cpu(cpu, buffer->cpumask))
                return 0;
 
@@ -4923,7 +4924,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
 static struct task_struct *rb_threads[NR_CPUS] __initdata;
 
 struct rb_test_data {
-       struct ring_buffer      *buffer;
+       struct trace_buffer *buffer;
        unsigned long           events;
        unsigned long           bytes_written;
        unsigned long           bytes_alloc;
@@ -5065,7 +5066,7 @@ static __init int rb_hammer_test(void *arg)
 static __init int test_ringbuffer(void)
 {
        struct task_struct *rb_hammer;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        int cpu;
        int ret = 0;
 
index 32149e4..8df0aa8 100644 (file)
@@ -29,7 +29,7 @@ static int reader_finish;
 static DECLARE_COMPLETION(read_start);
 static DECLARE_COMPLETION(read_done);
 
-static struct ring_buffer *buffer;
+static struct trace_buffer *buffer;
 static struct task_struct *producer;
 static struct task_struct *consumer;
 static unsigned long read;
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
new file mode 100644 (file)
index 0000000..4aefe00
--- /dev/null
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module for in-kernel sythetic event creation and generation.
+ *
+ * Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
+ */
+
+#include <linux/module.h>
+#include <linux/trace_events.h>
+
+/*
+ * This module is a simple test of basic functionality for in-kernel
+ * synthetic event creation and generation, the first and second tests
+ * using synth_event_gen_cmd_start() and synth_event_add_field(), the
+ * third uses synth_event_create() to do it all at once with a static
+ * field array.
+ *
+ * Following that are a few examples using the created events to test
+ * various ways of tracing a synthetic event.
+ *
+ * To test, select CONFIG_SYNTH_EVENT_GEN_TEST and build the module.
+ * Then:
+ *
+ * # insmod kernel/trace/synth_event_gen_test.ko
+ * # cat /sys/kernel/debug/tracing/trace
+ *
+ * You should see several events in the trace buffer -
+ * "create_synth_test", "empty_synth_test", and several instances of
+ * "gen_synth_test".
+ *
+ * To remove the events, remove the module:
+ *
+ * # rmmod synth_event_gen_test
+ *
+ */
+
+static struct trace_event_file *create_synth_test;
+static struct trace_event_file *empty_synth_test;
+static struct trace_event_file *gen_synth_test;
+
+/*
+ * Test to make sure we can create a synthetic event, then add more
+ * fields.
+ */
+static int __init test_gen_synth_cmd(void)
+{
+       struct dynevent_cmd cmd;
+       u64 vals[7];
+       char *buf;
+       int ret;
+
+       /* Create a buffer to hold the generated command */
+       buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       /* Before generating the command, initialize the cmd object */
+       synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+       /*
+        * Create the empty gen_synth_test synthetic event with the
+        * first 4 fields.
+        */
+       ret = synth_event_gen_cmd_start(&cmd, "gen_synth_test", THIS_MODULE,
+                                       "pid_t", "next_pid_field",
+                                       "char[16]", "next_comm_field",
+                                       "u64", "ts_ns",
+                                       "u64", "ts_ms");
+       if (ret)
+               goto free;
+
+       /* Use synth_event_add_field to add the rest of the fields */
+
+       ret = synth_event_add_field(&cmd, "unsigned int", "cpu");
+       if (ret)
+               goto free;
+
+       ret = synth_event_add_field(&cmd, "char[64]", "my_string_field");
+       if (ret)
+               goto free;
+
+       ret = synth_event_add_field(&cmd, "int", "my_int_field");
+       if (ret)
+               goto free;
+
+       ret = synth_event_gen_cmd_end(&cmd);
+       if (ret)
+               goto free;
+
+       /*
+        * Now get the gen_synth_test event file.  We need to prevent
+        * the instance and event from disappearing from underneath
+        * us, which trace_get_event_file() does (though in this case
+        * we're using the top-level instance which never goes away).
+        */
+       gen_synth_test = trace_get_event_file(NULL, "synthetic",
+                                             "gen_synth_test");
+       if (IS_ERR(gen_synth_test)) {
+               ret = PTR_ERR(gen_synth_test);
+               goto delete;
+       }
+
+       /* Enable the event or you won't see anything */
+       ret = trace_array_set_clr_event(gen_synth_test->tr,
+                                       "synthetic", "gen_synth_test", true);
+       if (ret) {
+               trace_put_event_file(gen_synth_test);
+               goto delete;
+       }
+
+       /* Create some bogus values just for testing */
+
+       vals[0] = 777;                  /* next_pid_field */
+       vals[1] = (u64)"hula hoops";    /* next_comm_field */
+       vals[2] = 1000000;              /* ts_ns */
+       vals[3] = 1000;                 /* ts_ms */
+       vals[4] = smp_processor_id();   /* cpu */
+       vals[5] = (u64)"thneed";        /* my_string_field */
+       vals[6] = 598;                  /* my_int_field */
+
+       /* Now generate a gen_synth_test event */
+       ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+       return ret;
+ delete:
+       /* We got an error after creating the event, delete it */
+       synth_event_delete("gen_synth_test");
+ free:
+       kfree(buf);
+
+       goto out;
+}
+
+/*
+ * Test to make sure we can create an initially empty synthetic event,
+ * then add all the fields.
+ */
+static int __init test_empty_synth_event(void)
+{
+       struct dynevent_cmd cmd;
+       u64 vals[7];
+       char *buf;
+       int ret;
+
+       /* Create a buffer to hold the generated command */
+       buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       /* Before generating the command, initialize the cmd object */
+       synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+       /*
+        * Create the empty_synth_test synthetic event with no fields.
+        */
+       ret = synth_event_gen_cmd_start(&cmd, "empty_synth_test", THIS_MODULE);
+       if (ret)
+               goto free;
+
+       /* Use synth_event_add_field to add all of the fields */
+
+       ret = synth_event_add_field(&cmd, "pid_t", "next_pid_field");
+       if (ret)
+               goto free;
+
+       ret = synth_event_add_field(&cmd, "char[16]", "next_comm_field");
+       if (ret)
+               goto free;
+
+       ret = synth_event_add_field(&cmd, "u64", "ts_ns");
+       if (ret)
+               goto free;
+
+       ret = synth_event_add_field(&cmd, "u64", "ts_ms");
+       if (ret)
+               goto free;
+
+       ret = synth_event_add_field(&cmd, "unsigned int", "cpu");
+       if (ret)
+               goto free;
+
+       ret = synth_event_add_field(&cmd, "char[64]", "my_string_field");
+       if (ret)
+               goto free;
+
+       ret = synth_event_add_field(&cmd, "int", "my_int_field");
+       if (ret)
+               goto free;
+
+       /* All fields have been added, close and register the synth event */
+
+       ret = synth_event_gen_cmd_end(&cmd);
+       if (ret)
+               goto free;
+
+       /*
+        * Now get the empty_synth_test event file.  We need to
+        * prevent the instance and event from disappearing from
+        * underneath us, which trace_get_event_file() does (though in
+        * this case we're using the top-level instance which never
+        * goes away).
+        */
+       empty_synth_test = trace_get_event_file(NULL, "synthetic",
+                                               "empty_synth_test");
+       if (IS_ERR(empty_synth_test)) {
+               ret = PTR_ERR(empty_synth_test);
+               goto delete;
+       }
+
+       /* Enable the event or you won't see anything */
+       ret = trace_array_set_clr_event(empty_synth_test->tr,
+                                       "synthetic", "empty_synth_test", true);
+       if (ret) {
+               trace_put_event_file(empty_synth_test);
+               goto delete;
+       }
+
+       /* Create some bogus values just for testing */
+
+       vals[0] = 777;                  /* next_pid_field */
+       vals[1] = (u64)"tiddlywinks";   /* next_comm_field */
+       vals[2] = 1000000;              /* ts_ns */
+       vals[3] = 1000;                 /* ts_ms */
+       vals[4] = smp_processor_id();   /* cpu */
+       vals[5] = (u64)"thneed_2.0";    /* my_string_field */
+       vals[6] = 399;                  /* my_int_field */
+
+       /* Now trace an empty_synth_test event */
+       ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+       return ret;
+ delete:
+       /* We got an error after creating the event, delete it */
+       synth_event_delete("empty_synth_test");
+ free:
+       kfree(buf);
+
+       goto out;
+}
+
+static struct synth_field_desc create_synth_test_fields[] = {
+       { .type = "pid_t",              .name = "next_pid_field" },
+       { .type = "char[16]",           .name = "next_comm_field" },
+       { .type = "u64",                .name = "ts_ns" },
+       { .type = "u64",                .name = "ts_ms" },
+       { .type = "unsigned int",       .name = "cpu" },
+       { .type = "char[64]",           .name = "my_string_field" },
+       { .type = "int",                .name = "my_int_field" },
+};
+
+/*
+ * Test synthetic event creation all at once from array of field
+ * descriptors.
+ */
+static int __init test_create_synth_event(void)
+{
+       u64 vals[7];
+       int ret;
+
+       /* Create the create_synth_test event with the fields above */
+       ret = synth_event_create("create_synth_test",
+                                create_synth_test_fields,
+                                ARRAY_SIZE(create_synth_test_fields),
+                                THIS_MODULE);
+       if (ret)
+               goto out;
+
+       /*
+        * Now get the create_synth_test event file.  We need to
+        * prevent the instance and event from disappearing from
+        * underneath us, which trace_get_event_file() does (though in
+        * this case we're using the top-level instance which never
+        * goes away).
+        */
+       create_synth_test = trace_get_event_file(NULL, "synthetic",
+                                                "create_synth_test");
+       if (IS_ERR(create_synth_test)) {
+               ret = PTR_ERR(create_synth_test);
+               goto delete;
+       }
+
+       /* Enable the event or you won't see anything */
+       ret = trace_array_set_clr_event(create_synth_test->tr,
+                                       "synthetic", "create_synth_test", true);
+       if (ret) {
+               trace_put_event_file(create_synth_test);
+               goto delete;
+       }
+
+       /* Create some bogus values just for testing */
+
+       vals[0] = 777;                  /* next_pid_field */
+       vals[1] = (u64)"tiddlywinks";   /* next_comm_field */
+       vals[2] = 1000000;              /* ts_ns */
+       vals[3] = 1000;                 /* ts_ms */
+       vals[4] = smp_processor_id();   /* cpu */
+       vals[5] = (u64)"thneed";        /* my_string_field */
+       vals[6] = 398;                  /* my_int_field */
+
+       /* Now generate a create_synth_test event */
+       ret = synth_event_trace_array(create_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+       return ret;
+ delete:
+       /* We got an error after creating the event, delete it */
+       ret = synth_event_delete("create_synth_test");
+
+       goto out;
+}
+
+/*
+ * Test tracing a synthetic event by reserving trace buffer space,
+ * then filling in fields one after another.
+ */
+static int __init test_add_next_synth_val(void)
+{
+       struct synth_event_trace_state trace_state;
+       int ret;
+
+       /* Start by reserving space in the trace buffer */
+       ret = synth_event_trace_start(gen_synth_test, &trace_state);
+       if (ret)
+               return ret;
+
+       /* Write some bogus values into the trace buffer, one after another */
+
+       /* next_pid_field */
+       ret = synth_event_add_next_val(777, &trace_state);
+       if (ret)
+               goto out;
+
+       /* next_comm_field */
+       ret = synth_event_add_next_val((u64)"slinky", &trace_state);
+       if (ret)
+               goto out;
+
+       /* ts_ns */
+       ret = synth_event_add_next_val(1000000, &trace_state);
+       if (ret)
+               goto out;
+
+       /* ts_ms */
+       ret = synth_event_add_next_val(1000, &trace_state);
+       if (ret)
+               goto out;
+
+       /* cpu */
+       ret = synth_event_add_next_val(smp_processor_id(), &trace_state);
+       if (ret)
+               goto out;
+
+       /* my_string_field */
+       ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state);
+       if (ret)
+               goto out;
+
+       /* my_int_field */
+       ret = synth_event_add_next_val(395, &trace_state);
+ out:
+       /* Finally, commit the event */
+       ret = synth_event_trace_end(&trace_state);
+
+       return ret;
+}
+
+/*
+ * Test tracing a synthetic event by reserving trace buffer space,
+ * then filling in fields using field names, which can be done in any
+ * order.
+ */
+static int __init test_add_synth_val(void)
+{
+       struct synth_event_trace_state trace_state;
+       int ret;
+
+       /* Start by reserving space in the trace buffer */
+       ret = synth_event_trace_start(gen_synth_test, &trace_state);
+       if (ret)
+               return ret;
+
+       /* Write some bogus values into the trace buffer, using field names */
+
+       ret = synth_event_add_val("ts_ns", 1000000, &trace_state);
+       if (ret)
+               goto out;
+
+       ret = synth_event_add_val("ts_ms", 1000, &trace_state);
+       if (ret)
+               goto out;
+
+       ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state);
+       if (ret)
+               goto out;
+
+       ret = synth_event_add_val("next_pid_field", 777, &trace_state);
+       if (ret)
+               goto out;
+
+       ret = synth_event_add_val("next_comm_field", (u64)"silly putty",
+                                 &trace_state);
+       if (ret)
+               goto out;
+
+       ret = synth_event_add_val("my_string_field", (u64)"thneed_9",
+                                 &trace_state);
+       if (ret)
+               goto out;
+
+       ret = synth_event_add_val("my_int_field", 3999, &trace_state);
+ out:
+       /* Finally, commit the event */
+       ret = synth_event_trace_end(&trace_state);
+
+       return ret;
+}
+
+/*
+ * Test tracing a synthetic event all at once from array of values.
+ */
+static int __init test_trace_synth_event(void)
+{
+       int ret;
+
+       /* Trace some bogus values just for testing */
+       ret = synth_event_trace(create_synth_test, 7,   /* number of values */
+                               444,                    /* next_pid_field */
+                               (u64)"clackers",        /* next_comm_field */
+                               1000000,                /* ts_ns */
+                               1000,                   /* ts_ms */
+                               smp_processor_id(),     /* cpu */
+                               (u64)"Thneed",          /* my_string_field */
+                               999);                   /* my_int_field */
+       return ret;
+}
+
+static int __init synth_event_gen_test_init(void)
+{
+       int ret;
+
+       ret = test_gen_synth_cmd();
+       if (ret)
+               return ret;
+
+       ret = test_empty_synth_event();
+       if (ret) {
+               WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+                                                 "synthetic",
+                                                 "gen_synth_test", false));
+               trace_put_event_file(gen_synth_test);
+               WARN_ON(synth_event_delete("gen_synth_test"));
+               goto out;
+       }
+
+       ret = test_create_synth_event();
+       if (ret) {
+               WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+                                                 "synthetic",
+                                                 "gen_synth_test", false));
+               trace_put_event_file(gen_synth_test);
+               WARN_ON(synth_event_delete("gen_synth_test"));
+
+               WARN_ON(trace_array_set_clr_event(empty_synth_test->tr,
+                                                 "synthetic",
+                                                 "empty_synth_test", false));
+               trace_put_event_file(empty_synth_test);
+               WARN_ON(synth_event_delete("empty_synth_test"));
+               goto out;
+       }
+
+       ret = test_add_next_synth_val();
+       WARN_ON(ret);
+
+       ret = test_add_synth_val();
+       WARN_ON(ret);
+
+       ret = test_trace_synth_event();
+       WARN_ON(ret);
+ out:
+       return ret;
+}
+
+static void __exit synth_event_gen_test_exit(void)
+{
+       /* Disable the event or you can't remove it */
+       WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+                                         "synthetic",
+                                         "gen_synth_test", false));
+
+       /* Now give the file and instance back */
+       trace_put_event_file(gen_synth_test);
+
+       /* Now unregister and free the synthetic event */
+       WARN_ON(synth_event_delete("gen_synth_test"));
+
+       /* Disable the event or you can't remove it */
+       WARN_ON(trace_array_set_clr_event(empty_synth_test->tr,
+                                         "synthetic",
+                                         "empty_synth_test", false));
+
+       /* Now give the file and instance back */
+       trace_put_event_file(empty_synth_test);
+
+       /* Now unregister and free the synthetic event */
+       WARN_ON(synth_event_delete("empty_synth_test"));
+
+       /* Disable the event or you can't remove it */
+       WARN_ON(trace_array_set_clr_event(create_synth_test->tr,
+                                         "synthetic",
+                                         "create_synth_test", false));
+
+       /* Now give the file and instance back */
+       trace_put_event_file(create_synth_test);
+
+       /* Now unregister and free the synthetic event */
+       WARN_ON(synth_event_delete("create_synth_test"));
+}
+
+module_init(synth_event_gen_test_init)
+module_exit(synth_event_gen_test_exit)
+
+MODULE_AUTHOR("Tom Zanussi");
+MODULE_DESCRIPTION("synthetic event generation test");
+MODULE_LICENSE("GPL v2");
index 5b6ee4a..c797a15 100644 (file)
@@ -162,8 +162,8 @@ union trace_eval_map_item {
 static union trace_eval_map_item *trace_eval_maps;
 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
 
-static int tracing_set_tracer(struct trace_array *tr, const char *buf);
-static void ftrace_trace_userstack(struct ring_buffer *buffer,
+int tracing_set_tracer(struct trace_array *tr, const char *buf);
+static void ftrace_trace_userstack(struct trace_buffer *buffer,
                                   unsigned long flags, int pc);
 
 #define MAX_TRACER_SIZE                100
@@ -338,7 +338,7 @@ int tracing_check_open_get_tr(struct trace_array *tr)
 }
 
 int call_filter_check_discard(struct trace_event_call *call, void *rec,
-                             struct ring_buffer *buffer,
+                             struct trace_buffer *buffer,
                              struct ring_buffer_event *event)
 {
        if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
@@ -603,7 +603,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
        return read;
 }
 
-static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
+static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
 {
        u64 ts;
 
@@ -619,7 +619,7 @@ static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 
 u64 ftrace_now(int cpu)
 {
-       return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+       return buffer_ftrace_now(&global_trace.array_buffer, cpu);
 }
 
 /**
@@ -747,22 +747,22 @@ static inline void trace_access_lock_init(void)
 #endif
 
 #ifdef CONFIG_STACKTRACE
-static void __ftrace_trace_stack(struct ring_buffer *buffer,
+static void __ftrace_trace_stack(struct trace_buffer *buffer,
                                 unsigned long flags,
                                 int skip, int pc, struct pt_regs *regs);
 static inline void ftrace_trace_stack(struct trace_array *tr,
-                                     struct ring_buffer *buffer,
+                                     struct trace_buffer *buffer,
                                      unsigned long flags,
                                      int skip, int pc, struct pt_regs *regs);
 
 #else
-static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
+static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
                                        unsigned long flags,
                                        int skip, int pc, struct pt_regs *regs)
 {
 }
 static inline void ftrace_trace_stack(struct trace_array *tr,
-                                     struct ring_buffer *buffer,
+                                     struct trace_buffer *buffer,
                                      unsigned long flags,
                                      int skip, int pc, struct pt_regs *regs)
 {
@@ -780,7 +780,7 @@ trace_event_setup(struct ring_buffer_event *event,
 }
 
 static __always_inline struct ring_buffer_event *
-__trace_buffer_lock_reserve(struct ring_buffer *buffer,
+__trace_buffer_lock_reserve(struct trace_buffer *buffer,
                          int type,
                          unsigned long len,
                          unsigned long flags, int pc)
@@ -796,8 +796,8 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer,
 
 void tracer_tracing_on(struct trace_array *tr)
 {
-       if (tr->trace_buffer.buffer)
-               ring_buffer_record_on(tr->trace_buffer.buffer);
+       if (tr->array_buffer.buffer)
+               ring_buffer_record_on(tr->array_buffer.buffer);
        /*
         * This flag is looked at when buffers haven't been allocated
         * yet, or by some tracers (like irqsoff), that just want to
@@ -825,7 +825,7 @@ EXPORT_SYMBOL_GPL(tracing_on);
 
 
 static __always_inline void
-__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
 {
        __this_cpu_write(trace_taskinfo_save, true);
 
@@ -848,7 +848,7 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve
 int __trace_puts(unsigned long ip, const char *str, int size)
 {
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        struct print_entry *entry;
        unsigned long irq_flags;
        int alloc;
@@ -865,11 +865,14 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        alloc = sizeof(*entry) + size + 2; /* possible \n added */
 
        local_save_flags(irq_flags);
-       buffer = global_trace.trace_buffer.buffer;
+       buffer = global_trace.array_buffer.buffer;
+       ring_buffer_nest_start(buffer);
        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
                                            irq_flags, pc);
-       if (!event)
-               return 0;
+       if (!event) {
+               size = 0;
+               goto out;
+       }
 
        entry = ring_buffer_event_data(event);
        entry->ip = ip;
@@ -885,7 +888,8 @@ int __trace_puts(unsigned long ip, const char *str, int size)
 
        __buffer_unlock_commit(buffer, event);
        ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
-
+ out:
+       ring_buffer_nest_end(buffer);
        return size;
 }
 EXPORT_SYMBOL_GPL(__trace_puts);
@@ -898,10 +902,11 @@ EXPORT_SYMBOL_GPL(__trace_puts);
 int __trace_bputs(unsigned long ip, const char *str)
 {
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        struct bputs_entry *entry;
        unsigned long irq_flags;
        int size = sizeof(struct bputs_entry);
+       int ret = 0;
        int pc;
 
        if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
@@ -913,11 +918,13 @@ int __trace_bputs(unsigned long ip, const char *str)
                return 0;
 
        local_save_flags(irq_flags);
-       buffer = global_trace.trace_buffer.buffer;
+       buffer = global_trace.array_buffer.buffer;
+
+       ring_buffer_nest_start(buffer);
        event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
                                            irq_flags, pc);
        if (!event)
-               return 0;
+               goto out;
 
        entry = ring_buffer_event_data(event);
        entry->ip                       = ip;
@@ -926,7 +933,10 @@ int __trace_bputs(unsigned long ip, const char *str)
        __buffer_unlock_commit(buffer, event);
        ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
 
-       return 1;
+       ret = 1;
+ out:
+       ring_buffer_nest_end(buffer);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(__trace_bputs);
 
@@ -1036,9 +1046,9 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
 }
 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
 
-static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
-                                       struct trace_buffer *size_buf, int cpu_id);
-static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
+static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
+                                       struct array_buffer *size_buf, int cpu_id);
+static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
 
 int tracing_alloc_snapshot_instance(struct trace_array *tr)
 {
@@ -1048,7 +1058,7 @@ int tracing_alloc_snapshot_instance(struct trace_array *tr)
 
                /* allocate spare buffer */
                ret = resize_buffer_duplicate_size(&tr->max_buffer,
-                                  &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+                                  &tr->array_buffer, RING_BUFFER_ALL_CPUS);
                if (ret < 0)
                        return ret;
 
@@ -1251,8 +1261,8 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 
 void tracer_tracing_off(struct trace_array *tr)
 {
-       if (tr->trace_buffer.buffer)
-               ring_buffer_record_off(tr->trace_buffer.buffer);
+       if (tr->array_buffer.buffer)
+               ring_buffer_record_off(tr->array_buffer.buffer);
        /*
         * This flag is looked at when buffers haven't been allocated
         * yet, or by some tracers (like irqsoff), that just want to
@@ -1294,8 +1304,8 @@ void disable_trace_on_warning(void)
  */
 bool tracer_tracing_is_on(struct trace_array *tr)
 {
-       if (tr->trace_buffer.buffer)
-               return ring_buffer_record_is_on(tr->trace_buffer.buffer);
+       if (tr->array_buffer.buffer)
+               return ring_buffer_record_is_on(tr->array_buffer.buffer);
        return !tr->buffer_disabled;
 }
 
@@ -1590,8 +1600,8 @@ void latency_fsnotify(struct trace_array *tr)
 static void
 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-       struct trace_buffer *trace_buf = &tr->trace_buffer;
-       struct trace_buffer *max_buf = &tr->max_buffer;
+       struct array_buffer *trace_buf = &tr->array_buffer;
+       struct array_buffer *max_buf = &tr->max_buffer;
        struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
        struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
 
@@ -1649,8 +1659,8 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 
        arch_spin_lock(&tr->max_lock);
 
-       /* Inherit the recordable setting from trace_buffer */
-       if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+       /* Inherit the recordable setting from array_buffer */
+       if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
                ring_buffer_record_on(tr->max_buffer.buffer);
        else
                ring_buffer_record_off(tr->max_buffer.buffer);
@@ -1659,7 +1669,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
        if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
                goto out_unlock;
 #endif
-       swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
+       swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
 
        __update_max_tr(tr, tsk, cpu);
 
@@ -1692,7 +1702,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        arch_spin_lock(&tr->max_lock);
 
-       ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
+       ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
 
        if (ret == -EBUSY) {
                /*
@@ -1718,7 +1728,7 @@ static int wait_on_pipe(struct trace_iterator *iter, int full)
        if (trace_buffer_iter(iter, iter->cpu_file))
                return 0;
 
-       return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
+       return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
                                full);
 }
 
@@ -1769,7 +1779,7 @@ static int run_tracer_selftest(struct tracer *type)
         * internal tracing to verify that everything is in order.
         * If we fail, we do not register this tracer.
         */
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
 
        tr->current_trace = type;
 
@@ -1795,7 +1805,7 @@ static int run_tracer_selftest(struct tracer *type)
                return -1;
        }
        /* Only reset on passing, to avoid touching corrupted buffers */
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        if (type->use_max_tr) {
@@ -1962,9 +1972,9 @@ int __init register_tracer(struct tracer *type)
        return ret;
 }
 
-static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
+static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
 {
-       struct ring_buffer *buffer = buf->buffer;
+       struct trace_buffer *buffer = buf->buffer;
 
        if (!buffer)
                return;
@@ -1978,9 +1988,9 @@ static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
        ring_buffer_record_enable(buffer);
 }
 
-void tracing_reset_online_cpus(struct trace_buffer *buf)
+void tracing_reset_online_cpus(struct array_buffer *buf)
 {
-       struct ring_buffer *buffer = buf->buffer;
+       struct trace_buffer *buffer = buf->buffer;
        int cpu;
 
        if (!buffer)
@@ -2008,7 +2018,7 @@ void tracing_reset_all_online_cpus(void)
                if (!tr->clear_trace)
                        continue;
                tr->clear_trace = false;
-               tracing_reset_online_cpus(&tr->trace_buffer);
+               tracing_reset_online_cpus(&tr->array_buffer);
 #ifdef CONFIG_TRACER_MAX_TRACE
                tracing_reset_online_cpus(&tr->max_buffer);
 #endif
@@ -2098,7 +2108,7 @@ int is_tracing_stopped(void)
  */
 void tracing_start(void)
 {
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        unsigned long flags;
 
        if (tracing_disabled)
@@ -2117,7 +2127,7 @@ void tracing_start(void)
        /* Prevent the buffers from switching */
        arch_spin_lock(&global_trace.max_lock);
 
-       buffer = global_trace.trace_buffer.buffer;
+       buffer = global_trace.array_buffer.buffer;
        if (buffer)
                ring_buffer_record_enable(buffer);
 
@@ -2135,7 +2145,7 @@ void tracing_start(void)
 
 static void tracing_start_tr(struct trace_array *tr)
 {
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        unsigned long flags;
 
        if (tracing_disabled)
@@ -2156,7 +2166,7 @@ static void tracing_start_tr(struct trace_array *tr)
                goto out;
        }
 
-       buffer = tr->trace_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        if (buffer)
                ring_buffer_record_enable(buffer);
 
@@ -2172,7 +2182,7 @@ static void tracing_start_tr(struct trace_array *tr)
  */
 void tracing_stop(void)
 {
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&global_trace.start_lock, flags);
@@ -2182,7 +2192,7 @@ void tracing_stop(void)
        /* Prevent the buffers from switching */
        arch_spin_lock(&global_trace.max_lock);
 
-       buffer = global_trace.trace_buffer.buffer;
+       buffer = global_trace.array_buffer.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
 
@@ -2200,7 +2210,7 @@ void tracing_stop(void)
 
 static void tracing_stop_tr(struct trace_array *tr)
 {
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        unsigned long flags;
 
        /* If global, we need to also stop the max tracer */
@@ -2211,7 +2221,7 @@ static void tracing_stop_tr(struct trace_array *tr)
        if (tr->stop_count++)
                goto out;
 
-       buffer = tr->trace_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
 
@@ -2442,7 +2452,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
 
 struct ring_buffer_event *
-trace_buffer_lock_reserve(struct ring_buffer *buffer,
+trace_buffer_lock_reserve(struct trace_buffer *buffer,
                          int type,
                          unsigned long len,
                          unsigned long flags, int pc)
@@ -2561,10 +2571,10 @@ void trace_buffered_event_disable(void)
        preempt_enable();
 }
 
-static struct ring_buffer *temp_buffer;
+static struct trace_buffer *temp_buffer;
 
 struct ring_buffer_event *
-trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
+trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
                          struct trace_event_file *trace_file,
                          int type, unsigned long len,
                          unsigned long flags, int pc)
@@ -2572,7 +2582,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
        struct ring_buffer_event *entry;
        int val;
 
-       *current_rb = trace_file->tr->trace_buffer.buffer;
+       *current_rb = trace_file->tr->array_buffer.buffer;
 
        if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
             (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
@@ -2610,6 +2620,7 @@ static DEFINE_MUTEX(tracepoint_printk_mutex);
 static void output_printk(struct trace_event_buffer *fbuffer)
 {
        struct trace_event_call *event_call;
+       struct trace_event_file *file;
        struct trace_event *event;
        unsigned long flags;
        struct trace_iterator *iter = tracepoint_print_iter;
@@ -2623,6 +2634,12 @@ static void output_printk(struct trace_event_buffer *fbuffer)
            !event_call->event.funcs->trace)
                return;
 
+       file = fbuffer->trace_file;
+       if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+           (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+            !filter_match_preds(file->filter, fbuffer->entry)))
+               return;
+
        event = &fbuffer->trace_file->event_call->event;
 
        spin_lock_irqsave(&tracepoint_iter_lock, flags);
@@ -2673,9 +2690,9 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
        if (static_key_false(&tracepoint_printk_key.key))
                output_printk(fbuffer);
 
-       event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
+       event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
                                    fbuffer->event, fbuffer->entry,
-                                   fbuffer->flags, fbuffer->pc);
+                                   fbuffer->flags, fbuffer->pc, fbuffer->regs);
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
@@ -2689,7 +2706,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 # define STACK_SKIP 3
 
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
-                                    struct ring_buffer *buffer,
+                                    struct trace_buffer *buffer,
                                     struct ring_buffer_event *event,
                                     unsigned long flags, int pc,
                                     struct pt_regs *regs)
@@ -2710,7 +2727,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
  */
 void
-trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
                                   struct ring_buffer_event *event)
 {
        __buffer_unlock_commit(buffer, event);
@@ -2845,7 +2862,7 @@ trace_function(struct trace_array *tr,
               int pc)
 {
        struct trace_event_call *call = &event_function;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
        struct ring_buffer_event *event;
        struct ftrace_entry *entry;
 
@@ -2883,7 +2900,7 @@ struct ftrace_stacks {
 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
 
-static void __ftrace_trace_stack(struct ring_buffer *buffer,
+static void __ftrace_trace_stack(struct trace_buffer *buffer,
                                 unsigned long flags,
                                 int skip, int pc, struct pt_regs *regs)
 {
@@ -2958,7 +2975,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
 }
 
 static inline void ftrace_trace_stack(struct trace_array *tr,
-                                     struct ring_buffer *buffer,
+                                     struct trace_buffer *buffer,
                                      unsigned long flags,
                                      int skip, int pc, struct pt_regs *regs)
 {
@@ -2971,7 +2988,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
                   int pc)
 {
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
 
        if (rcu_is_watching()) {
                __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
@@ -3009,7 +3026,7 @@ void trace_dump_stack(int skip)
        /* Skip 1 to skip this function. */
        skip++;
 #endif
-       __ftrace_trace_stack(global_trace.trace_buffer.buffer,
+       __ftrace_trace_stack(global_trace.array_buffer.buffer,
                             flags, skip, preempt_count(), NULL);
 }
 EXPORT_SYMBOL_GPL(trace_dump_stack);
@@ -3018,7 +3035,7 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
 static DEFINE_PER_CPU(int, user_stack_count);
 
 static void
-ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
+ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
 {
        struct trace_event_call *call = &event_user_stack;
        struct ring_buffer_event *event;
@@ -3063,7 +3080,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
        preempt_enable();
 }
 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
-static void ftrace_trace_userstack(struct ring_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_buffer *buffer,
                                   unsigned long flags, int pc)
 {
 }
@@ -3109,7 +3126,7 @@ static int alloc_percpu_trace_buffer(void)
        struct trace_buffer_struct *buffers;
 
        buffers = alloc_percpu(struct trace_buffer_struct);
-       if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
+       if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
                return -ENOMEM;
 
        trace_percpu_buffer = buffers;
@@ -3154,7 +3171,7 @@ void trace_printk_init_buffers(void)
         * directly here. If the global_trace.buffer is already
         * allocated here, then this was called by module code.
         */
-       if (global_trace.trace_buffer.buffer)
+       if (global_trace.array_buffer.buffer)
                tracing_start_cmdline_record();
 }
 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
@@ -3188,7 +3205,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 {
        struct trace_event_call *call = &event_bprint;
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        struct trace_array *tr = &global_trace;
        struct bprint_entry *entry;
        unsigned long flags;
@@ -3213,11 +3230,12 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
 
        if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
-               goto out;
+               goto out_put;
 
        local_save_flags(flags);
        size = sizeof(*entry) + sizeof(u32) * len;
-       buffer = tr->trace_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
+       ring_buffer_nest_start(buffer);
        event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
                                            flags, pc);
        if (!event)
@@ -3233,6 +3251,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        }
 
 out:
+       ring_buffer_nest_end(buffer);
+out_put:
        put_trace_buf();
 
 out_nobuffer:
@@ -3245,7 +3265,7 @@ EXPORT_SYMBOL_GPL(trace_vbprintk);
 
 __printf(3, 0)
 static int
-__trace_array_vprintk(struct ring_buffer *buffer,
+__trace_array_vprintk(struct trace_buffer *buffer,
                      unsigned long ip, const char *fmt, va_list args)
 {
        struct trace_event_call *call = &event_print;
@@ -3275,6 +3295,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
 
        local_save_flags(flags);
        size = sizeof(*entry) + len + 1;
+       ring_buffer_nest_start(buffer);
        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
                                            flags, pc);
        if (!event)
@@ -3289,6 +3310,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
        }
 
 out:
+       ring_buffer_nest_end(buffer);
        put_trace_buf();
 
 out_nobuffer:
@@ -3302,7 +3324,7 @@ __printf(3, 0)
 int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
 {
-       return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
+       return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
 }
 
 __printf(3, 0)
@@ -3326,7 +3348,7 @@ int trace_array_printk(struct trace_array *tr,
 EXPORT_SYMBOL_GPL(trace_array_printk);
 
 __printf(3, 4)
-int trace_array_printk_buf(struct ring_buffer *buffer,
+int trace_array_printk_buf(struct trace_buffer *buffer,
                           unsigned long ip, const char *fmt, ...)
 {
        int ret;
@@ -3367,7 +3389,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
        if (buf_iter)
                event = ring_buffer_iter_peek(buf_iter, ts);
        else
-               event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
+               event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
                                         lost_events);
 
        if (event) {
@@ -3382,7 +3404,7 @@ static struct trace_entry *
 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
                  unsigned long *missing_events, u64 *ent_ts)
 {
-       struct ring_buffer *buffer = iter->trace_buffer->buffer;
+       struct trace_buffer *buffer = iter->array_buffer->buffer;
        struct trace_entry *ent, *next = NULL;
        unsigned long lost_events = 0, next_lost = 0;
        int cpu_file = iter->cpu_file;
@@ -3459,7 +3481,7 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)
 
 static void trace_consume(struct trace_iterator *iter)
 {
-       ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
+       ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
                            &iter->lost_events);
 }
 
@@ -3497,7 +3519,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
        unsigned long entries = 0;
        u64 ts;
 
-       per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
+       per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
 
        buf_iter = trace_buffer_iter(iter, cpu);
        if (!buf_iter)
@@ -3511,13 +3533,13 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
         * by the timestamp being before the start of the buffer.
         */
        while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
-               if (ts >= iter->trace_buffer->time_start)
+               if (ts >= iter->array_buffer->time_start)
                        break;
                entries++;
                ring_buffer_read(buf_iter, NULL);
        }
 
-       per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
+       per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
 }
 
 /*
@@ -3602,7 +3624,7 @@ static void s_stop(struct seq_file *m, void *p)
 }
 
 static void
-get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
+get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
                      unsigned long *entries, int cpu)
 {
        unsigned long count;
@@ -3624,7 +3646,7 @@ get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
 }
 
 static void
-get_total_entries(struct trace_buffer *buf,
+get_total_entries(struct array_buffer *buf,
                  unsigned long *total, unsigned long *entries)
 {
        unsigned long t, e;
@@ -3647,7 +3669,7 @@ unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
        if (!tr)
                tr = &global_trace;
 
-       get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
+       get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
 
        return entries;
 }
@@ -3659,7 +3681,7 @@ unsigned long trace_total_entries(struct trace_array *tr)
        if (!tr)
                tr = &global_trace;
 
-       get_total_entries(&tr->trace_buffer, &total, &entries);
+       get_total_entries(&tr->array_buffer, &total, &entries);
 
        return entries;
 }
@@ -3676,7 +3698,7 @@ static void print_lat_help_header(struct seq_file *m)
                    "#     \\   /      |||||  \\    |   /         \n");
 }
 
-static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
+static void print_event_info(struct array_buffer *buf, struct seq_file *m)
 {
        unsigned long total;
        unsigned long entries;
@@ -3687,7 +3709,7 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
        seq_puts(m, "#\n");
 }
 
-static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
+static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
                                   unsigned int flags)
 {
        bool tgid = flags & TRACE_ITER_RECORD_TGID;
@@ -3698,7 +3720,7 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
        seq_printf(m, "#              | |     %s    |       |         |\n",      tgid ? "  |      " : "");
 }
 
-static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
+static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
                                       unsigned int flags)
 {
        bool tgid = flags & TRACE_ITER_RECORD_TGID;
@@ -3720,7 +3742,7 @@ void
 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 {
        unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
-       struct trace_buffer *buf = iter->trace_buffer;
+       struct array_buffer *buf = iter->array_buffer;
        struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
        struct tracer *type = iter->trace;
        unsigned long entries;
@@ -3795,7 +3817,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
            cpumask_test_cpu(iter->cpu, iter->started))
                return;
 
-       if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
+       if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
                return;
 
        if (cpumask_available(iter->started))
@@ -3929,7 +3951,7 @@ int trace_empty(struct trace_iterator *iter)
                        if (!ring_buffer_iter_empty(buf_iter))
                                return 0;
                } else {
-                       if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
+                       if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
                                return 0;
                }
                return 1;
@@ -3941,7 +3963,7 @@ int trace_empty(struct trace_iterator *iter)
                        if (!ring_buffer_iter_empty(buf_iter))
                                return 0;
                } else {
-                       if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
+                       if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
                                return 0;
                }
        }
@@ -4031,10 +4053,10 @@ void trace_default_header(struct seq_file *m)
        } else {
                if (!(trace_flags & TRACE_ITER_VERBOSE)) {
                        if (trace_flags & TRACE_ITER_IRQ_INFO)
-                               print_func_help_header_irq(iter->trace_buffer,
+                               print_func_help_header_irq(iter->array_buffer,
                                                           m, trace_flags);
                        else
-                               print_func_help_header(iter->trace_buffer, m,
+                               print_func_help_header(iter->array_buffer, m,
                                                       trace_flags);
                }
        }
@@ -4192,21 +4214,21 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
 #ifdef CONFIG_TRACER_MAX_TRACE
        /* Currently only the top directory has a snapshot */
        if (tr->current_trace->print_max || snapshot)
-               iter->trace_buffer = &tr->max_buffer;
+               iter->array_buffer = &tr->max_buffer;
        else
 #endif
-               iter->trace_buffer = &tr->trace_buffer;
+               iter->array_buffer = &tr->array_buffer;
        iter->snapshot = snapshot;
        iter->pos = -1;
        iter->cpu_file = tracing_get_cpu(inode);
        mutex_init(&iter->mutex);
 
        /* Notify the tracer early; before we stop tracing. */
-       if (iter->trace && iter->trace->open)
+       if (iter->trace->open)
                iter->trace->open(iter);
 
        /* Annotate start of buffers if we had overruns */
-       if (ring_buffer_overruns(iter->trace_buffer->buffer))
+       if (ring_buffer_overruns(iter->array_buffer->buffer))
                iter->iter_flags |= TRACE_FILE_ANNOTATE;
 
        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -4220,7 +4242,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
        if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
                for_each_tracing_cpu(cpu) {
                        iter->buffer_iter[cpu] =
-                               ring_buffer_read_prepare(iter->trace_buffer->buffer,
+                               ring_buffer_read_prepare(iter->array_buffer->buffer,
                                                         cpu, GFP_KERNEL);
                }
                ring_buffer_read_prepare_sync();
@@ -4231,7 +4253,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
        } else {
                cpu = iter->cpu_file;
                iter->buffer_iter[cpu] =
-                       ring_buffer_read_prepare(iter->trace_buffer->buffer,
+                       ring_buffer_read_prepare(iter->array_buffer->buffer,
                                                 cpu, GFP_KERNEL);
                ring_buffer_read_prepare_sync();
                ring_buffer_read_start(iter->buffer_iter[cpu]);
@@ -4357,7 +4379,7 @@ static int tracing_open(struct inode *inode, struct file *file)
        /* If this file was open for write, then erase contents */
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                int cpu = tracing_get_cpu(inode);
-               struct trace_buffer *trace_buf = &tr->trace_buffer;
+               struct array_buffer *trace_buf = &tr->array_buffer;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
                if (tr->current_trace->print_max)
@@ -4554,20 +4576,13 @@ out_err:
        return count;
 }
 
-static ssize_t
-tracing_cpumask_write(struct file *filp, const char __user *ubuf,
-                     size_t count, loff_t *ppos)
+int tracing_set_cpumask(struct trace_array *tr,
+                       cpumask_var_t tracing_cpumask_new)
 {
-       struct trace_array *tr = file_inode(filp)->i_private;
-       cpumask_var_t tracing_cpumask_new;
-       int err, cpu;
-
-       if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
-               return -ENOMEM;
+       int cpu;
 
-       err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
-       if (err)
-               goto err_unlock;
+       if (!tr)
+               return -EINVAL;
 
        local_irq_disable();
        arch_spin_lock(&tr->max_lock);
@@ -4578,24 +4593,47 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                 */
                if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
                                !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
-                       atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
-                       ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
+                       atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+                       ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
                }
                if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
                                cpumask_test_cpu(cpu, tracing_cpumask_new)) {
-                       atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
-                       ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
+                       atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+                       ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
                }
        }
        arch_spin_unlock(&tr->max_lock);
        local_irq_enable();
 
        cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
+
+       return 0;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+                     size_t count, loff_t *ppos)
+{
+       struct trace_array *tr = file_inode(filp)->i_private;
+       cpumask_var_t tracing_cpumask_new;
+       int err;
+
+       if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+               return -ENOMEM;
+
+       err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
+       if (err)
+               goto err_free;
+
+       err = tracing_set_cpumask(tr, tracing_cpumask_new);
+       if (err)
+               goto err_free;
+
        free_cpumask_var(tracing_cpumask_new);
 
        return count;
 
-err_unlock:
+err_free:
        free_cpumask_var(tracing_cpumask_new);
 
        return err;
@@ -4726,7 +4764,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
                ftrace_pid_follow_fork(tr, enabled);
 
        if (mask == TRACE_ITER_OVERWRITE) {
-               ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
+               ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
 #ifdef CONFIG_TRACER_MAX_TRACE
                ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
 #endif
@@ -4740,7 +4778,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
        return 0;
 }
 
-static int trace_set_options(struct trace_array *tr, char *option)
+int trace_set_options(struct trace_array *tr, char *option)
 {
        char *cmp;
        int neg = 0;
@@ -5361,14 +5399,12 @@ static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
         * Paranoid! If ptr points to end, we don't want to increment past it.
         * This really should never happen.
         */
+       (*pos)++;
        ptr = update_eval_map(ptr);
        if (WARN_ON_ONCE(!ptr))
                return NULL;
 
        ptr++;
-
-       (*pos)++;
-
        ptr = update_eval_map(ptr);
 
        return ptr;
@@ -5534,11 +5570,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
 
 int tracer_init(struct tracer *t, struct trace_array *tr)
 {
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
        return t->init(tr);
 }
 
-static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
+static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
 {
        int cpu;
 
@@ -5548,8 +5584,8 @@ static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 /* resize @tr's buffer to the size of @size_tr's entries */
-static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
-                                       struct trace_buffer *size_buf, int cpu_id)
+static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
+                                       struct array_buffer *size_buf, int cpu_id)
 {
        int cpu, ret = 0;
 
@@ -5587,10 +5623,10 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
        ring_buffer_expanded = true;
 
        /* May be called before buffers are initialized */
-       if (!tr->trace_buffer.buffer)
+       if (!tr->array_buffer.buffer)
                return 0;
 
-       ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
+       ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
        if (ret < 0)
                return ret;
 
@@ -5601,8 +5637,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
 
        ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
        if (ret < 0) {
-               int r = resize_buffer_duplicate_size(&tr->trace_buffer,
-                                                    &tr->trace_buffer, cpu);
+               int r = resize_buffer_duplicate_size(&tr->array_buffer,
+                                                    &tr->array_buffer, cpu);
                if (r < 0) {
                        /*
                         * AARGH! We are left with different
@@ -5633,15 +5669,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
        if (cpu == RING_BUFFER_ALL_CPUS)
-               set_buffer_entries(&tr->trace_buffer, size);
+               set_buffer_entries(&tr->array_buffer, size);
        else
-               per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
+               per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
 
        return ret;
 }
 
-static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
-                                         unsigned long size, int cpu_id)
+ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+                                 unsigned long size, int cpu_id)
 {
        int ret = size;
 
@@ -5720,7 +5756,7 @@ static void add_tracer_options(struct trace_array *tr, struct tracer *t)
        create_trace_option_files(tr, t);
 }
 
-static int tracing_set_tracer(struct trace_array *tr, const char *buf)
+int tracing_set_tracer(struct trace_array *tr, const char *buf)
 {
        struct tracer *t;
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -5979,7 +6015,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 
        iter->tr = tr;
-       iter->trace_buffer = &tr->trace_buffer;
+       iter->array_buffer = &tr->array_buffer;
        iter->cpu_file = tracing_get_cpu(inode);
        mutex_init(&iter->mutex);
        filp->private_data = iter;
@@ -6039,7 +6075,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
                 */
                return EPOLLIN | EPOLLRDNORM;
        else
-               return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
+               return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
                                             filp, poll_table);
 }
 
@@ -6356,8 +6392,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
                for_each_tracing_cpu(cpu) {
                        /* fill in the size from first enabled cpu */
                        if (size == 0)
-                               size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
-                       if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
+                               size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
+                       if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
                                buf_size_same = 0;
                                break;
                        }
@@ -6373,7 +6409,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
                } else
                        r = sprintf(buf, "X\n");
        } else
-               r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
+               r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
 
        mutex_unlock(&trace_types_lock);
 
@@ -6420,7 +6456,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
 
        mutex_lock(&trace_types_lock);
        for_each_tracing_cpu(cpu) {
-               size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
+               size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
                if (!ring_buffer_expanded)
                        expanded_size += trace_buf_size >> 10;
        }
@@ -6470,7 +6506,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        struct trace_array *tr = filp->private_data;
        struct ring_buffer_event *event;
        enum event_trigger_type tt = ETT_NONE;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        struct print_entry *entry;
        unsigned long irq_flags;
        ssize_t written;
@@ -6499,7 +6535,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        if (cnt < FAULTED_SIZE)
                size += FAULTED_SIZE - cnt;
 
-       buffer = tr->trace_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
                                            irq_flags, preempt_count());
        if (unlikely(!event))
@@ -6550,7 +6586,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
 {
        struct trace_array *tr = filp->private_data;
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        struct raw_data_entry *entry;
        unsigned long irq_flags;
        ssize_t written;
@@ -6579,7 +6615,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
        if (cnt < FAULT_SIZE_ID)
                size += FAULT_SIZE_ID - cnt;
 
-       buffer = tr->trace_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
                                            irq_flags, preempt_count());
        if (!event)
@@ -6634,13 +6670,13 @@ int tracing_set_clock(struct trace_array *tr, const char *clockstr)
 
        tr->clock_id = i;
 
-       ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
+       ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
 
        /*
         * New clock may not be consistent with the previous clock.
         * Reset the buffer so that it doesn't have incomparable timestamps.
         */
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        if (tr->max_buffer.buffer)
@@ -6703,7 +6739,7 @@ static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
 
        mutex_lock(&trace_types_lock);
 
-       if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
+       if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
                seq_puts(m, "delta [absolute]\n");
        else
                seq_puts(m, "[delta] absolute\n");
@@ -6748,7 +6784,7 @@ int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
                        goto out;
        }
 
-       ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
+       ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        if (tr->max_buffer.buffer)
@@ -6797,7 +6833,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
                ret = 0;
 
                iter->tr = tr;
-               iter->trace_buffer = &tr->max_buffer;
+               iter->array_buffer = &tr->max_buffer;
                iter->cpu_file = tracing_get_cpu(inode);
                m->private = iter;
                file->private_data = m;
@@ -6860,7 +6896,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 #endif
                if (tr->allocated_snapshot)
                        ret = resize_buffer_duplicate_size(&tr->max_buffer,
-                                       &tr->trace_buffer, iter->cpu_file);
+                                       &tr->array_buffer, iter->cpu_file);
                else
                        ret = tracing_alloc_snapshot_instance(tr);
                if (ret < 0)
@@ -6935,7 +6971,7 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
        }
 
        info->iter.snapshot = true;
-       info->iter.trace_buffer = &info->iter.tr->max_buffer;
+       info->iter.array_buffer = &info->iter.tr->max_buffer;
 
        return ret;
 }
@@ -7310,7 +7346,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
        info->iter.tr           = tr;
        info->iter.cpu_file     = tracing_get_cpu(inode);
        info->iter.trace        = tr->current_trace;
-       info->iter.trace_buffer = &tr->trace_buffer;
+       info->iter.array_buffer = &tr->array_buffer;
        info->spare             = NULL;
        /* Force reading ring buffer for first read */
        info->read              = (unsigned int)-1;
@@ -7355,7 +7391,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
 #endif
 
        if (!info->spare) {
-               info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
+               info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
                                                          iter->cpu_file);
                if (IS_ERR(info->spare)) {
                        ret = PTR_ERR(info->spare);
@@ -7373,7 +7409,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
 
  again:
        trace_access_lock(iter->cpu_file);
-       ret = ring_buffer_read_page(iter->trace_buffer->buffer,
+       ret = ring_buffer_read_page(iter->array_buffer->buffer,
                                    &info->spare,
                                    count,
                                    iter->cpu_file, 0);
@@ -7423,7 +7459,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
        __trace_array_put(iter->tr);
 
        if (info->spare)
-               ring_buffer_free_read_page(iter->trace_buffer->buffer,
+               ring_buffer_free_read_page(iter->array_buffer->buffer,
                                           info->spare_cpu, info->spare);
        kfree(info);
 
@@ -7433,7 +7469,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
 }
 
 struct buffer_ref {
-       struct ring_buffer      *buffer;
+       struct trace_buffer     *buffer;
        void                    *page;
        int                     cpu;
        refcount_t              refcount;
@@ -7528,7 +7564,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 
  again:
        trace_access_lock(iter->cpu_file);
-       entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+       entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 
        for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
                struct page *page;
@@ -7541,7 +7577,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                }
 
                refcount_set(&ref->refcount, 1);
-               ref->buffer = iter->trace_buffer->buffer;
+               ref->buffer = iter->array_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
                if (IS_ERR(ref->page)) {
                        ret = PTR_ERR(ref->page);
@@ -7569,7 +7605,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                spd.nr_pages++;
                *ppos += PAGE_SIZE;
 
-               entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+               entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
        }
 
        trace_access_unlock(iter->cpu_file);
@@ -7613,7 +7649,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
 {
        struct inode *inode = file_inode(filp);
        struct trace_array *tr = inode->i_private;
-       struct trace_buffer *trace_buf = &tr->trace_buffer;
+       struct array_buffer *trace_buf = &tr->array_buffer;
        int cpu = tracing_get_cpu(inode);
        struct trace_seq *s;
        unsigned long cnt;
@@ -7894,7 +7930,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
 
        tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
 
-       WARN_ONCE(!tr->percpu_dir,
+       MEM_FAIL(!tr->percpu_dir,
                  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
 
        return tr->percpu_dir;
@@ -8215,7 +8251,7 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
        for (cnt = 0; opts[cnt].name; cnt++) {
                create_trace_option_file(tr, &topts[cnt], flags,
                                         &opts[cnt]);
-               WARN_ONCE(topts[cnt].entry == NULL,
+               MEM_FAIL(topts[cnt].entry == NULL,
                          "Failed to create trace option: %s",
                          opts[cnt].name);
        }
@@ -8272,7 +8308,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
                size_t cnt, loff_t *ppos)
 {
        struct trace_array *tr = filp->private_data;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
        unsigned long val;
        int ret;
 
@@ -8362,7 +8398,7 @@ static void
 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 
 static int
-allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
+allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
 {
        enum ring_buffer_flags rb_flags;
 
@@ -8382,8 +8418,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
        }
 
        /* Allocate the first page for all buffers */
-       set_buffer_entries(&tr->trace_buffer,
-                          ring_buffer_size(tr->trace_buffer.buffer, 0));
+       set_buffer_entries(&tr->array_buffer,
+                          ring_buffer_size(tr->array_buffer.buffer, 0));
 
        return 0;
 }
@@ -8392,18 +8428,18 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
 {
        int ret;
 
-       ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
+       ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
        if (ret)
                return ret;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        ret = allocate_trace_buffer(tr, &tr->max_buffer,
                                    allocate_snapshot ? size : 1);
-       if (WARN_ON(ret)) {
-               ring_buffer_free(tr->trace_buffer.buffer);
-               tr->trace_buffer.buffer = NULL;
-               free_percpu(tr->trace_buffer.data);
-               tr->trace_buffer.data = NULL;
+       if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
+               ring_buffer_free(tr->array_buffer.buffer);
+               tr->array_buffer.buffer = NULL;
+               free_percpu(tr->array_buffer.data);
+               tr->array_buffer.data = NULL;
                return -ENOMEM;
        }
        tr->allocated_snapshot = allocate_snapshot;
@@ -8417,7 +8453,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
        return 0;
 }
 
-static void free_trace_buffer(struct trace_buffer *buf)
+static void free_trace_buffer(struct array_buffer *buf)
 {
        if (buf->buffer) {
                ring_buffer_free(buf->buffer);
@@ -8432,7 +8468,7 @@ static void free_trace_buffers(struct trace_array *tr)
        if (!tr)
                return;
 
-       free_trace_buffer(&tr->trace_buffer);
+       free_trace_buffer(&tr->array_buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
        free_trace_buffer(&tr->max_buffer);
@@ -8463,6 +8499,34 @@ static void update_tracer_options(struct trace_array *tr)
        mutex_unlock(&trace_types_lock);
 }
 
+/* Must have trace_types_lock held */
+struct trace_array *trace_array_find(const char *instance)
+{
+       struct trace_array *tr, *found = NULL;
+
+       list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+               if (tr->name && strcmp(tr->name, instance) == 0) {
+                       found = tr;
+                       break;
+               }
+       }
+
+       return found;
+}
+
+struct trace_array *trace_array_find_get(const char *instance)
+{
+       struct trace_array *tr;
+
+       mutex_lock(&trace_types_lock);
+       tr = trace_array_find(instance);
+       if (tr)
+               tr->ref++;
+       mutex_unlock(&trace_types_lock);
+
+       return tr;
+}
+
 static struct trace_array *trace_array_create(const char *name)
 {
        struct trace_array *tr;
@@ -8504,7 +8568,7 @@ static struct trace_array *trace_array_create(const char *name)
 
        ret = event_trace_add_tracer(tr->dir, tr);
        if (ret) {
-               tracefs_remove_recursive(tr->dir);
+               tracefs_remove(tr->dir);
                goto out_free_tr;
        }
 
@@ -8539,10 +8603,8 @@ static int instance_mkdir(const char *name)
        mutex_lock(&trace_types_lock);
 
        ret = -EEXIST;
-       list_for_each_entry(tr, &ftrace_trace_arrays, list) {
-               if (tr->name && strcmp(tr->name, name) == 0)
-                       goto out_unlock;
-       }
+       if (trace_array_find(name))
+               goto out_unlock;
 
        tr = trace_array_create(name);
 
@@ -8564,6 +8626,10 @@ out_unlock:
  * NOTE: This function increments the reference counter associated with the
  * trace array returned. This makes sure it cannot be freed while in use.
  * Use trace_array_put() once the trace array is no longer needed.
+ * If the trace_array is to be freed, trace_array_destroy() needs to
+ * be called after the trace_array_put(), or simply let user space delete
+ * it from the tracefs instances directory. But until the
+ * trace_array_put() is called, user space can not delete it.
  *
  */
 struct trace_array *trace_array_get_by_name(const char *name)
@@ -8613,7 +8679,7 @@ static int __remove_instance(struct trace_array *tr)
        event_trace_del_tracer(tr);
        ftrace_clear_pids(tr);
        ftrace_destroy_function_files(tr);
-       tracefs_remove_recursive(tr->dir);
+       tracefs_remove(tr->dir);
        free_trace_buffers(tr);
 
        for (i = 0; i < tr->nr_topts; i++) {
@@ -8666,12 +8732,9 @@ static int instance_rmdir(const char *name)
        mutex_lock(&trace_types_lock);
 
        ret = -ENODEV;
-       list_for_each_entry(tr, &ftrace_trace_arrays, list) {
-               if (tr->name && strcmp(tr->name, name) == 0) {
-                       ret = __remove_instance(tr);
-                       break;
-               }
-       }
+       tr = trace_array_find(name);
+       if (tr)
+               ret = __remove_instance(tr);
 
        mutex_unlock(&trace_types_lock);
        mutex_unlock(&event_mutex);
@@ -8684,7 +8747,7 @@ static __init void create_trace_instances(struct dentry *d_tracer)
        trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
                                                         instance_mkdir,
                                                         instance_rmdir);
-       if (WARN_ON(!trace_instance_dir))
+       if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
                return;
 }
 
@@ -8754,7 +8817,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 #endif
 
        if (ftrace_create_function_files(tr, d_tracer))
-               WARN(1, "Could not allocate function filter files");
+               MEM_FAIL(1, "Could not allocate function filter files");
 
 #ifdef CONFIG_TRACER_SNAPSHOT
        trace_create_file("snapshot", 0644, d_tracer,
@@ -9036,13 +9099,13 @@ void trace_init_global_iter(struct trace_iterator *iter)
        iter->tr = &global_trace;
        iter->trace = iter->tr->current_trace;
        iter->cpu_file = RING_BUFFER_ALL_CPUS;
-       iter->trace_buffer = &global_trace.trace_buffer;
+       iter->array_buffer = &global_trace.array_buffer;
 
        if (iter->trace && iter->trace->open)
                iter->trace->open(iter);
 
        /* Annotate start of buffers if we had overruns */
-       if (ring_buffer_overruns(iter->trace_buffer->buffer))
+       if (ring_buffer_overruns(iter->array_buffer->buffer))
                iter->iter_flags |= TRACE_FILE_ANNOTATE;
 
        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -9083,7 +9146,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
        trace_init_global_iter(&iter);
 
        for_each_tracing_cpu(cpu) {
-               atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+               atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
        }
 
        old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -9151,7 +9214,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
        tr->trace_flags |= old_userobj;
 
        for_each_tracing_cpu(cpu) {
-               atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+               atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
        }
        atomic_dec(&dump_running);
        printk_nmi_direct_exit();
@@ -9306,8 +9369,7 @@ __init static int tracer_alloc_buffers(void)
 
        /* TODO: make the number of buffers hot pluggable with CPUS */
        if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
-               printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
-               WARN_ON(1);
+               MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
                goto out_free_savedcmd;
        }
 
@@ -9380,7 +9442,8 @@ void __init early_trace_init(void)
        if (tracepoint_printk) {
                tracepoint_print_iter =
                        kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
-               if (WARN_ON(!tracepoint_print_iter))
+               if (MEM_FAIL(!tracepoint_print_iter,
+                            "Failed to allocate trace iterator\n"))
                        tracepoint_printk = 0;
                else
                        static_key_enable(&tracepoint_printk_key.key);
index a98dce1..99372dd 100644 (file)
@@ -93,6 +93,18 @@ enum trace_type {
 
 #include "trace_entries.h"
 
+/* Use this for memory failure errors */
+#define MEM_FAIL(condition, fmt, ...) ({                       \
+       static bool __section(.data.once) __warned;             \
+       int __ret_warn_once = !!(condition);                    \
+                                                               \
+       if (unlikely(__ret_warn_once && !__warned)) {           \
+               __warned = true;                                \
+               pr_err("ERROR: " fmt, ##__VA_ARGS__);           \
+       }                                                       \
+       unlikely(__ret_warn_once);                              \
+})
+
 /*
  * syscalls are special, and need special handling, this is why
  * they are not included in trace_entries.h
@@ -175,9 +187,9 @@ struct trace_array_cpu {
 struct tracer;
 struct trace_option_dentry;
 
-struct trace_buffer {
+struct array_buffer {
        struct trace_array              *tr;
-       struct ring_buffer              *buffer;
+       struct trace_buffer             *buffer;
        struct trace_array_cpu __percpu *data;
        u64                             time_start;
        int                             cpu;
@@ -248,7 +260,7 @@ struct cond_snapshot {
 struct trace_array {
        struct list_head        list;
        char                    *name;
-       struct trace_buffer     trace_buffer;
+       struct array_buffer     array_buffer;
 #ifdef CONFIG_TRACER_MAX_TRACE
        /*
         * The max_buffer is used to snapshot the trace when a maximum
@@ -256,12 +268,12 @@ struct trace_array {
         * Some tracers will use this to store a maximum trace while
         * it continues examining live traces.
         *
-        * The buffers for the max_buffer are set up the same as the trace_buffer
+        * The buffers for the max_buffer are set up the same as the array_buffer
         * When a snapshot is taken, the buffer of the max_buffer is swapped
-        * with the buffer of the trace_buffer and the buffers are reset for
-        * the trace_buffer so the tracing can continue.
+        * with the buffer of the array_buffer and the buffers are reset for
+        * the array_buffer so the tracing can continue.
         */
-       struct trace_buffer     max_buffer;
+       struct array_buffer     max_buffer;
        bool                    allocated_snapshot;
 #endif
 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
@@ -345,6 +357,8 @@ extern struct mutex trace_types_lock;
 
 extern int trace_array_get(struct trace_array *tr);
 extern int tracing_check_open_get_tr(struct trace_array *tr);
+extern struct trace_array *trace_array_find(const char *instance);
+extern struct trace_array *trace_array_find_get(const char *instance);
 
 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
@@ -684,7 +698,7 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
 
 int tracer_init(struct tracer *t, struct trace_array *tr);
 int tracing_is_enabled(void);
-void tracing_reset_online_cpus(struct trace_buffer *buf);
+void tracing_reset_online_cpus(struct array_buffer *buf);
 void tracing_reset_current(int cpu);
 void tracing_reset_all_online_cpus(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
@@ -704,7 +718,7 @@ struct dentry *tracing_init_dentry(void);
 struct ring_buffer_event;
 
 struct ring_buffer_event *
-trace_buffer_lock_reserve(struct ring_buffer *buffer,
+trace_buffer_lock_reserve(struct trace_buffer *buffer,
                          int type,
                          unsigned long len,
                          unsigned long flags,
@@ -716,7 +730,7 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
                                          int *ent_cpu, u64 *ent_ts);
 
-void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
                                        struct ring_buffer_event *event);
 
 int trace_empty(struct trace_iterator *iter);
@@ -872,7 +886,7 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 extern int
 trace_array_vprintk(struct trace_array *tr,
                    unsigned long ip, const char *fmt, va_list args);
-int trace_array_printk_buf(struct ring_buffer *buffer,
+int trace_array_printk_buf(struct trace_buffer *buffer,
                           unsigned long ip, const char *fmt, ...);
 void trace_printk_seq(struct trace_seq *s);
 enum print_line_t print_trace_line(struct trace_iterator *iter);
@@ -949,22 +963,31 @@ extern void __trace_graph_return(struct trace_array *tr,
                                 unsigned long flags, int pc);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-extern struct ftrace_hash *ftrace_graph_hash;
-extern struct ftrace_hash *ftrace_graph_notrace_hash;
+extern struct ftrace_hash __rcu *ftrace_graph_hash;
+extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
 
 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 {
        unsigned long addr = trace->func;
        int ret = 0;
+       struct ftrace_hash *hash;
 
        preempt_disable_notrace();
 
-       if (ftrace_hash_empty(ftrace_graph_hash)) {
+       /*
+        * Have to open code "rcu_dereference_sched()" because the
+        * function graph tracer can be called when RCU is not
+        * "watching".
+        * Protected with schedule_on_each_cpu(ftrace_sync)
+        */
+       hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
+
+       if (ftrace_hash_empty(hash)) {
                ret = 1;
                goto out;
        }
 
-       if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
+       if (ftrace_lookup_ip(hash, addr)) {
 
                /*
                 * This needs to be cleared on the return functions
@@ -1000,10 +1023,20 @@ static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
 static inline int ftrace_graph_notrace_addr(unsigned long addr)
 {
        int ret = 0;
+       struct ftrace_hash *notrace_hash;
 
        preempt_disable_notrace();
 
-       if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
+       /*
+        * Have to open code "rcu_dereference_sched()" because the
+        * function graph tracer can be called when RCU is not
+        * "watching".
+        * Protected with schedule_on_each_cpu(ftrace_sync)
+        */
+       notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+                                                !preemptible());
+
+       if (ftrace_lookup_ip(notrace_hash, addr))
                ret = 1;
 
        preempt_enable_notrace();
@@ -1056,7 +1089,7 @@ struct ftrace_func_command {
 extern bool ftrace_filter_param __initdata;
 static inline int ftrace_trace_task(struct trace_array *tr)
 {
-       return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
+       return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
 }
 extern int ftrace_is_dead(void);
 int ftrace_create_function_files(struct trace_array *tr,
@@ -1144,6 +1177,11 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd);
 void ftrace_create_filter_files(struct ftrace_ops *ops,
                                struct dentry *parent);
 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
+
+extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+                            int len, int reset);
+extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+                             int len, int reset);
 #else
 struct ftrace_func_command;
 
@@ -1366,17 +1404,17 @@ struct trace_subsystem_dir {
 };
 
 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
-                                    struct ring_buffer *buffer,
+                                    struct trace_buffer *buffer,
                                     struct ring_buffer_event *event);
 
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
-                                    struct ring_buffer *buffer,
+                                    struct trace_buffer *buffer,
                                     struct ring_buffer_event *event,
                                     unsigned long flags, int pc,
                                     struct pt_regs *regs);
 
 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
-                                             struct ring_buffer *buffer,
+                                             struct trace_buffer *buffer,
                                              struct ring_buffer_event *event,
                                              unsigned long flags, int pc)
 {
@@ -1389,7 +1427,7 @@ void trace_buffered_event_disable(void);
 void trace_buffered_event_enable(void);
 
 static inline void
-__trace_event_discard_commit(struct ring_buffer *buffer,
+__trace_event_discard_commit(struct trace_buffer *buffer,
                             struct ring_buffer_event *event)
 {
        if (this_cpu_read(trace_buffered_event) == event) {
@@ -1415,7 +1453,7 @@ __trace_event_discard_commit(struct ring_buffer *buffer,
  */
 static inline bool
 __event_trigger_test_discard(struct trace_event_file *file,
-                            struct ring_buffer *buffer,
+                            struct trace_buffer *buffer,
                             struct ring_buffer_event *event,
                             void *entry,
                             enum event_trigger_type *tt)
@@ -1450,7 +1488,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
  */
 static inline void
 event_trigger_unlock_commit(struct trace_event_file *file,
-                           struct ring_buffer *buffer,
+                           struct trace_buffer *buffer,
                            struct ring_buffer_event *event,
                            void *entry, unsigned long irq_flags, int pc)
 {
@@ -1481,7 +1519,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
  */
 static inline void
 event_trigger_unlock_commit_regs(struct trace_event_file *file,
-                                struct ring_buffer *buffer,
+                                struct trace_buffer *buffer,
                                 struct ring_buffer_event *event,
                                 void *entry, unsigned long irq_flags, int pc,
                                 struct pt_regs *regs)
@@ -1892,6 +1930,15 @@ void trace_printk_start_comm(void);
 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
 
+/* Used from boot time tracer */
+extern int trace_set_options(struct trace_array *tr, char *option);
+extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
+extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+                                         unsigned long size, int cpu_id);
+extern int tracing_set_cpumask(struct trace_array *tr,
+                               cpumask_var_t tracing_cpumask_new);
+
+
 #define MAX_EVENT_NAME_LEN     64
 
 extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
@@ -1949,6 +1996,9 @@ static inline const char *get_syscall_name(int syscall)
 #ifdef CONFIG_EVENT_TRACING
 void trace_event_init(void);
 void trace_event_eval_update(struct trace_eval_map **map, int len);
+/* Used from boot time tracer */
+extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
+extern int trigger_process_regex(struct trace_event_file *file, char *buff);
 #else
 static inline void __init trace_event_init(void) { }
 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
new file mode 100644 (file)
index 0000000..06d7feb
--- /dev/null
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace_boot.c
+ * Tracing kernel boot-time
+ */
+
+#define pr_fmt(fmt)    "trace_boot: " fmt
+
+#include <linux/bootconfig.h>
+#include <linux/cpumask.h>
+#include <linux/ftrace.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/trace.h>
+#include <linux/trace_events.h>
+
+#include "trace.h"
+
+#define MAX_BUF_LEN 256
+
+static void __init
+trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
+{
+       struct xbc_node *anode;
+       const char *p;
+       char buf[MAX_BUF_LEN];
+       unsigned long v = 0;
+
+       /* Common ftrace options */
+       xbc_node_for_each_array_value(node, "options", anode, p) {
+               if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
+                       pr_err("String is too long: %s\n", p);
+                       continue;
+               }
+
+               if (trace_set_options(tr, buf) < 0)
+                       pr_err("Failed to set option: %s\n", buf);
+       }
+
+       p = xbc_node_find_value(node, "trace_clock", NULL);
+       if (p && *p != '\0') {
+               if (tracing_set_clock(tr, p) < 0)
+                       pr_err("Failed to set trace clock: %s\n", p);
+       }
+
+       p = xbc_node_find_value(node, "buffer_size", NULL);
+       if (p && *p != '\0') {
+               v = memparse(p, NULL);
+               if (v < PAGE_SIZE)
+                       pr_err("Buffer size is too small: %s\n", p);
+               if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0)
+                       pr_err("Failed to resize trace buffer to %s\n", p);
+       }
+
+       p = xbc_node_find_value(node, "cpumask", NULL);
+       if (p && *p != '\0') {
+               cpumask_var_t new_mask;
+
+               if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+                       if (cpumask_parse(p, new_mask) < 0 ||
+                           tracing_set_cpumask(tr, new_mask) < 0)
+                               pr_err("Failed to set new CPU mask %s\n", p);
+                       free_cpumask_var(new_mask);
+               }
+       }
+}
+
+#ifdef CONFIG_EVENT_TRACING
+static void __init
+trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
+{
+       struct xbc_node *anode;
+       char buf[MAX_BUF_LEN];
+       const char *p;
+
+       xbc_node_for_each_array_value(node, "events", anode, p) {
+               if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
+                       pr_err("String is too long: %s\n", p);
+                       continue;
+               }
+
+               if (ftrace_set_clr_event(tr, buf, 1) < 0)
+                       pr_err("Failed to enable event: %s\n", p);
+       }
+}
+
+#ifdef CONFIG_KPROBE_EVENTS
+static int __init
+trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
+{
+       struct dynevent_cmd cmd;
+       struct xbc_node *anode;
+       char buf[MAX_BUF_LEN];
+       const char *val;
+       int ret;
+
+       kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
+
+       ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
+       if (ret)
+               return ret;
+
+       xbc_node_for_each_array_value(node, "probes", anode, val) {
+               ret = kprobe_event_add_field(&cmd, val);
+               if (ret)
+                       return ret;
+       }
+
+       ret = kprobe_event_gen_cmd_end(&cmd);
+       if (ret)
+               pr_err("Failed to add probe: %s\n", buf);
+
+       return ret;
+}
+#else
+static inline int __init
+trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
+{
+       pr_err("Kprobe event is not supported.\n");
+       return -ENOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_HIST_TRIGGERS
+static int __init
+trace_boot_add_synth_event(struct xbc_node *node, const char *event)
+{
+       struct dynevent_cmd cmd;
+       struct xbc_node *anode;
+       char buf[MAX_BUF_LEN];
+       const char *p;
+       int ret;
+
+       synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
+
+       ret = synth_event_gen_cmd_start(&cmd, event, NULL);
+       if (ret)
+               return ret;
+
+       xbc_node_for_each_array_value(node, "fields", anode, p) {
+               ret = synth_event_add_field_str(&cmd, p);
+               if (ret)
+                       return ret;
+       }
+
+       ret = synth_event_gen_cmd_end(&cmd);
+       if (ret < 0)
+               pr_err("Failed to add synthetic event: %s\n", buf);
+
+       return ret;
+}
+#else
+static inline int __init
+trace_boot_add_synth_event(struct xbc_node *node, const char *event)
+{
+       pr_err("Synthetic event is not supported.\n");
+       return -ENOTSUPP;
+}
+#endif
+
+static void __init
+trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
+                         struct xbc_node *enode)
+{
+       struct trace_event_file *file;
+       struct xbc_node *anode;
+       char buf[MAX_BUF_LEN];
+       const char *p, *group, *event;
+
+       group = xbc_node_get_data(gnode);
+       event = xbc_node_get_data(enode);
+
+       if (!strcmp(group, "kprobes"))
+               if (trace_boot_add_kprobe_event(enode, event) < 0)
+                       return;
+       if (!strcmp(group, "synthetic"))
+               if (trace_boot_add_synth_event(enode, event) < 0)
+                       return;
+
+       mutex_lock(&event_mutex);
+       file = find_event_file(tr, group, event);
+       if (!file) {
+               pr_err("Failed to find event: %s:%s\n", group, event);
+               goto out;
+       }
+
+       p = xbc_node_find_value(enode, "filter", NULL);
+       if (p && *p != '\0') {
+               if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+                       pr_err("filter string is too long: %s\n", p);
+               else if (apply_event_filter(file, buf) < 0)
+                       pr_err("Failed to apply filter: %s\n", buf);
+       }
+
+       xbc_node_for_each_array_value(enode, "actions", anode, p) {
+               if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+                       pr_err("action string is too long: %s\n", p);
+               else if (trigger_process_regex(file, buf) < 0)
+                       pr_err("Failed to apply an action: %s\n", buf);
+       }
+
+       if (xbc_node_find_value(enode, "enable", NULL)) {
+               if (trace_event_enable_disable(file, 1, 0) < 0)
+                       pr_err("Failed to enable event node: %s:%s\n",
+                               group, event);
+       }
+out:
+       mutex_unlock(&event_mutex);
+}
+
+static void __init
+trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
+{
+       struct xbc_node *gnode, *enode;
+
+       node = xbc_node_find_child(node, "event");
+       if (!node)
+               return;
+       /* per-event key starts with "event.GROUP.EVENT" */
+       xbc_node_for_each_child(node, gnode)
+               xbc_node_for_each_child(gnode, enode)
+                       trace_boot_init_one_event(tr, gnode, enode);
+}
+#else
+#define trace_boot_enable_events(tr, node) do {} while (0)
+#define trace_boot_init_events(tr, node) do {} while (0)
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static void __init
+trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node)
+{
+       struct xbc_node *anode;
+       const char *p;
+       char *q;
+
+       xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) {
+               q = kstrdup(p, GFP_KERNEL);
+               if (!q)
+                       return;
+               if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0)
+                       pr_err("Failed to add %s to ftrace filter\n", p);
+               else
+                       ftrace_filter_param = true;
+               kfree(q);
+       }
+       xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) {
+               q = kstrdup(p, GFP_KERNEL);
+               if (!q)
+                       return;
+               if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0)
+                       pr_err("Failed to add %s to ftrace filter\n", p);
+               else
+                       ftrace_filter_param = true;
+               kfree(q);
+       }
+}
+#else
+#define trace_boot_set_ftrace_filter(tr, node) do {} while (0)
+#endif
+
+static void __init
+trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
+{
+       const char *p;
+
+       trace_boot_set_ftrace_filter(tr, node);
+
+       p = xbc_node_find_value(node, "tracer", NULL);
+       if (p && *p != '\0') {
+               if (tracing_set_tracer(tr, p) < 0)
+                       pr_err("Failed to set given tracer: %s\n", p);
+       }
+}
+
+static void __init
+trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node)
+{
+       trace_boot_set_instance_options(tr, node);
+       trace_boot_init_events(tr, node);
+       trace_boot_enable_events(tr, node);
+       trace_boot_enable_tracer(tr, node);
+}
+
+static void __init
+trace_boot_init_instances(struct xbc_node *node)
+{
+       struct xbc_node *inode;
+       struct trace_array *tr;
+       const char *p;
+
+       node = xbc_node_find_child(node, "instance");
+       if (!node)
+               return;
+
+       xbc_node_for_each_child(node, inode) {
+               p = xbc_node_get_data(inode);
+               if (!p || *p == '\0')
+                       continue;
+
+               tr = trace_array_get_by_name(p);
+               if (!tr) {
+                       pr_err("Failed to get trace instance %s\n", p);
+                       continue;
+               }
+               trace_boot_init_one_instance(tr, inode);
+               trace_array_put(tr);
+       }
+}
+
+static int __init trace_boot_init(void)
+{
+       struct xbc_node *trace_node;
+       struct trace_array *tr;
+
+       trace_node = xbc_find_node("ftrace");
+       if (!trace_node)
+               return 0;
+
+       tr = top_trace_array();
+       if (!tr)
+               return 0;
+
+       /* Global trace array is also one instance */
+       trace_boot_init_one_instance(tr, trace_node);
+       trace_boot_init_instances(trace_node);
+
+       return 0;
+}
+
+fs_initcall(trace_boot_init);
index 88e158d..eff0991 100644 (file)
@@ -32,10 +32,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 {
        struct trace_event_call *call = &event_branch;
        struct trace_array *tr = branch_tracer;
+       struct trace_buffer *buffer;
        struct trace_array_cpu *data;
        struct ring_buffer_event *event;
        struct trace_branch *entry;
-       struct ring_buffer *buffer;
        unsigned long flags;
        int pc;
        const char *p;
@@ -55,12 +55,12 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 
        raw_local_irq_save(flags);
        current->trace_recursion |= TRACE_BRANCH_BIT;
-       data = this_cpu_ptr(tr->trace_buffer.data);
+       data = this_cpu_ptr(tr->array_buffer.data);
        if (atomic_read(&data->disabled))
                goto out;
 
        pc = preempt_count();
-       buffer = tr->trace_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
                                          sizeof(*entry), flags, pc);
        if (!event)
index 89779eb..9f2e852 100644 (file)
@@ -223,3 +223,215 @@ static __init int init_dynamic_event(void)
        return 0;
 }
 fs_initcall(init_dynamic_event);
+
+/**
+ * dynevent_arg_add - Add an arg to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @arg: The argument to append to the current cmd
+ * @check_arg: An (optional) pointer to a function checking arg sanity
+ *
+ * Append an argument to a dynevent_cmd.  The argument string will be
+ * appended to the current cmd string, followed by a separator, if
+ * applicable.  Before the argument is added, the @check_arg function,
+ * if present, will be used to check the sanity of the current arg
+ * string.
+ *
+ * The cmd string and separator should be set using the
+ * dynevent_arg_init() before any arguments are added using this
+ * function.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_arg_add(struct dynevent_cmd *cmd,
+                    struct dynevent_arg *arg,
+                    dynevent_check_arg_fn_t check_arg)
+{
+       int ret = 0;
+
+       if (check_arg) {
+               ret = check_arg(arg);
+               if (ret)
+                       return ret;
+       }
+
+       ret = seq_buf_printf(&cmd->seq, " %s%c", arg->str, arg->separator);
+       if (ret) {
+               pr_err("String is too long: %s%c\n", arg->str, arg->separator);
+               return -E2BIG;
+       }
+
+       return ret;
+}
+
+/**
+ * dynevent_arg_pair_add - Add an arg pair to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @arg_pair: The argument pair to append to the current cmd
+ * @check_arg: An (optional) pointer to a function checking arg sanity
+ *
+ * Append an argument pair to a dynevent_cmd.  An argument pair
+ * consists of a left-hand-side argument and a right-hand-side
+ * argument separated by an operator, which can be whitespace, all
+ * followed by a separator, if applicable.  This can be used to add
+ * arguments of the form 'type variable_name;' or 'x+y'.
+ *
+ * The lhs argument string will be appended to the current cmd string,
+ * followed by an operator, if applicable, followd by the rhs string,
+ * followed finally by a separator, if applicable.  Before the
+ * argument is added, the @check_arg function, if present, will be
+ * used to check the sanity of the current arg strings.
+ *
+ * The cmd strings, operator, and separator should be set using the
+ * dynevent_arg_pair_init() before any arguments are added using this
+ * function.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_arg_pair_add(struct dynevent_cmd *cmd,
+                         struct dynevent_arg_pair *arg_pair,
+                         dynevent_check_arg_fn_t check_arg)
+{
+       int ret = 0;
+
+       if (check_arg) {
+               ret = check_arg(arg_pair);
+               if (ret)
+                       return ret;
+       }
+
+       ret = seq_buf_printf(&cmd->seq, " %s%c%s%c", arg_pair->lhs,
+                            arg_pair->operator, arg_pair->rhs,
+                            arg_pair->separator);
+       if (ret) {
+               pr_err("field string is too long: %s%c%s%c\n", arg_pair->lhs,
+                      arg_pair->operator, arg_pair->rhs,
+                      arg_pair->separator);
+               return -E2BIG;
+       }
+
+       return ret;
+}
+
+/**
+ * dynevent_str_add - Add a string to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @str: The string to append to the current cmd
+ *
+ * Append a string to a dynevent_cmd.  The string will be appended to
+ * the current cmd string as-is, with nothing prepended or appended.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_str_add(struct dynevent_cmd *cmd, const char *str)
+{
+       int ret = 0;
+
+       ret = seq_buf_puts(&cmd->seq, str);
+       if (ret) {
+               pr_err("String is too long: %s\n", str);
+               return -E2BIG;
+       }
+
+       return ret;
+}
+
+/**
+ * dynevent_cmd_init - Initialize a dynevent_cmd object
+ * @cmd: A pointer to the dynevent_cmd struct representing the cmd
+ * @buf: A pointer to the buffer to generate the command into
+ * @maxlen: The length of the buffer the command will be generated into
+ * @type: The type of the cmd, checked against further operations
+ * @run_command: The type-specific function that will actually run the command
+ *
+ * Initialize a dynevent_cmd.  A dynevent_cmd is used to build up and
+ * run dynamic event creation commands, such as commands for creating
+ * synthetic and kprobe events.  Before calling any of the functions
+ * used to build the command, a dynevent_cmd object should be
+ * instantiated and initialized using this function.
+ *
+ * The initialization sets things up by saving a pointer to the
+ * user-supplied buffer and its length via the @buf and @maxlen
+ * params, and by saving the cmd-specific @type and @run_command
+ * params which are used to check subsequent dynevent_cmd operations
+ * and actually run the command when complete.
+ */
+void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen,
+                      enum dynevent_type type,
+                      dynevent_create_fn_t run_command)
+{
+       memset(cmd, '\0', sizeof(*cmd));
+
+       seq_buf_init(&cmd->seq, buf, maxlen);
+       cmd->type = type;
+       cmd->run_command = run_command;
+}
+
+/**
+ * dynevent_arg_init - Initialize a dynevent_arg object
+ * @arg: A pointer to the dynevent_arg struct representing the arg
+ * @separator: An (optional) separator, appended after adding the arg
+ *
+ * Initialize a dynevent_arg object.  A dynevent_arg represents an
+ * object used to append single arguments to the current command
+ * string.  After the arg string is successfully appended to the
+ * command string, the optional @separator is appended.  If no
+ * separator was specified when initializing the arg, a space will be
+ * appended.
+ */
+void dynevent_arg_init(struct dynevent_arg *arg,
+                      char separator)
+{
+       memset(arg, '\0', sizeof(*arg));
+
+       if (!separator)
+               separator = ' ';
+       arg->separator = separator;
+}
+
+/**
+ * dynevent_arg_pair_init - Initialize a dynevent_arg_pair object
+ * @arg_pair: A pointer to the dynevent_arg_pair struct representing the arg
+ * @operator: An (optional) operator, appended after adding the first arg
+ * @separator: An (optional) separator, appended after adding the second arg
+ *
+ * Initialize a dynevent_arg_pair object.  A dynevent_arg_pair
+ * represents an object used to append argument pairs such as 'type
+ * variable_name;' or 'x+y' to the current command string.  An
+ * argument pair consists of a left-hand-side argument and a
+ * right-hand-side argument separated by an operator, which can be
+ * whitespace, all followed by a separator, if applicable.  After the
+ * first arg string is successfully appended to the command string,
+ * the optional @operator is appended, followed by the second arg and
+ * and optional @separator.  If no separator was specified when
+ * initializing the arg, a space will be appended.
+ */
+void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
+                           char operator, char separator)
+{
+       memset(arg_pair, '\0', sizeof(*arg_pair));
+
+       if (!operator)
+               operator = ' ';
+       arg_pair->operator = operator;
+
+       if (!separator)
+               separator = ' ';
+       arg_pair->separator = separator;
+}
+
+/**
+ * dynevent_create - Create the dynamic event contained in dynevent_cmd
+ * @cmd: The dynevent_cmd object containing the dynamic event creation command
+ *
+ * Once a dynevent_cmd object has been successfully built up via the
+ * dynevent_cmd_init(), dynevent_arg_add() and dynevent_arg_pair_add()
+ * functions, this function runs the final command to actually create
+ * the event.
+ *
+ * Return: 0 if the event was successfully created, error otherwise.
+ */
+int dynevent_create(struct dynevent_cmd *cmd)
+{
+       return cmd->run_command(cmd);
+}
+EXPORT_SYMBOL_GPL(dynevent_create);
index 4689813..d6857a2 100644 (file)
@@ -117,4 +117,36 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type);
 #define for_each_dyn_event_safe(pos, n)        \
        list_for_each_entry_safe(pos, n, &dyn_event_list, list)
 
+extern void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen,
+                             enum dynevent_type type,
+                             dynevent_create_fn_t run_command);
+
+typedef int (*dynevent_check_arg_fn_t)(void *data);
+
+struct dynevent_arg {
+       const char              *str;
+       char                    separator; /* e.g. ';', ',', or nothing */
+};
+
+extern void dynevent_arg_init(struct dynevent_arg *arg,
+                             char separator);
+extern int dynevent_arg_add(struct dynevent_cmd *cmd,
+                           struct dynevent_arg *arg,
+                           dynevent_check_arg_fn_t check_arg);
+
+struct dynevent_arg_pair {
+       const char              *lhs;
+       const char              *rhs;
+       char                    operator; /* e.g. '=' or nothing */
+       char                    separator; /* e.g. ';', ',', or nothing */
+};
+
+extern void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
+                                  char operator, char separator);
+
+extern int dynevent_arg_pair_add(struct dynevent_cmd *cmd,
+                                struct dynevent_arg_pair *arg_pair,
+                                dynevent_check_arg_fn_t check_arg);
+extern int dynevent_str_add(struct dynevent_cmd *cmd, const char *str);
+
 #endif
index 3e9d816..f22746f 100644 (file)
@@ -164,7 +164,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
 
        F_STRUCT(
                __field(        int,            size    )
-               __dynamic_array(unsigned long,  caller  )
+               __array(        unsigned long,  caller, FTRACE_STACK_ENTRIES    )
        ),
 
        F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"
index c8622a4..f38234e 100644 (file)
@@ -238,7 +238,7 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
        if (!pid_list)
                return false;
 
-       data = this_cpu_ptr(tr->trace_buffer.data);
+       data = this_cpu_ptr(tr->array_buffer.data);
 
        return data->ignore_pid;
 }
@@ -273,6 +273,7 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
        if (!fbuffer->event)
                return NULL;
 
+       fbuffer->regs = NULL;
        fbuffer->entry = ring_buffer_event_data(fbuffer->event);
        return fbuffer->entry;
 }
@@ -547,7 +548,7 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
 
-       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+       this_cpu_write(tr->array_buffer.data->ignore_pid,
                       trace_ignore_this_task(pid_list, prev) &&
                       trace_ignore_this_task(pid_list, next));
 }
@@ -561,7 +562,7 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
 
-       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+       this_cpu_write(tr->array_buffer.data->ignore_pid,
                       trace_ignore_this_task(pid_list, next));
 }
 
@@ -572,12 +573,12 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
        struct trace_pid_list *pid_list;
 
        /* Nothing to do if we are already tracing */
-       if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
+       if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
                return;
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
 
-       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+       this_cpu_write(tr->array_buffer.data->ignore_pid,
                       trace_ignore_this_task(pid_list, task));
 }
 
@@ -588,13 +589,13 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
        struct trace_pid_list *pid_list;
 
        /* Nothing to do if we are not tracing */
-       if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
+       if (this_cpu_read(tr->array_buffer.data->ignore_pid))
                return;
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
 
        /* Set tracing if current is enabled */
-       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+       this_cpu_write(tr->array_buffer.data->ignore_pid,
                       trace_ignore_this_task(pid_list, current));
 }
 
@@ -626,7 +627,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
        }
 
        for_each_possible_cpu(cpu)
-               per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
+               per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
 
        rcu_assign_pointer(tr->filtered_pids, NULL);
 
@@ -698,7 +699,7 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
                return;
 
        if (!--dir->nr_events) {
-               tracefs_remove_recursive(dir->entry);
+               tracefs_remove(dir->entry);
                list_del(&dir->list);
                __put_system_dir(dir);
        }
@@ -717,7 +718,7 @@ static void remove_event_file_dir(struct trace_event_file *file)
                }
                spin_unlock(&dir->d_lock);
 
-               tracefs_remove_recursive(dir);
+               tracefs_remove(dir);
        }
 
        list_del(&file->list);
@@ -1595,7 +1596,7 @@ static void ignore_task_cpu(void *data)
        pid_list = rcu_dereference_protected(tr->filtered_pids,
                                             mutex_is_locked(&event_mutex));
 
-       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+       this_cpu_write(tr->array_buffer.data->ignore_pid,
                       trace_ignore_this_task(pid_list, current));
 }
 
@@ -2553,6 +2554,91 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
        return file;
 }
 
+/**
+ * trace_get_event_file - Find and return a trace event file
+ * @instance: The name of the trace instance containing the event
+ * @system: The name of the system containing the event
+ * @event: The name of the event
+ *
+ * Return a trace event file given the trace instance name, trace
+ * system, and trace event name.  If the instance name is NULL, it
+ * refers to the top-level trace array.
+ *
+ * This function will look it up and return it if found, after calling
+ * trace_array_get() to prevent the instance from going away, and
+ * increment the event's module refcount to prevent it from being
+ * removed.
+ *
+ * To release the file, call trace_put_event_file(), which will call
+ * trace_array_put() and decrement the event's module refcount.
+ *
+ * Return: The trace event on success, ERR_PTR otherwise.
+ */
+struct trace_event_file *trace_get_event_file(const char *instance,
+                                             const char *system,
+                                             const char *event)
+{
+       struct trace_array *tr = top_trace_array();
+       struct trace_event_file *file = NULL;
+       int ret = -EINVAL;
+
+       if (instance) {
+               tr = trace_array_find_get(instance);
+               if (!tr)
+                       return ERR_PTR(-ENOENT);
+       } else {
+               ret = trace_array_get(tr);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
+
+       mutex_lock(&event_mutex);
+
+       file = find_event_file(tr, system, event);
+       if (!file) {
+               trace_array_put(tr);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Don't let event modules unload while in use */
+       ret = try_module_get(file->event_call->mod);
+       if (!ret) {
+               trace_array_put(tr);
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = 0;
+ out:
+       mutex_unlock(&event_mutex);
+
+       if (ret)
+               file = ERR_PTR(ret);
+
+       return file;
+}
+EXPORT_SYMBOL_GPL(trace_get_event_file);
+
+/**
+ * trace_put_event_file - Release a file from trace_get_event_file()
+ * @file: The trace event file
+ *
+ * If a file was retrieved using trace_get_event_file(), this should
+ * be called when it's no longer needed.  It will cancel the previous
+ * trace_array_get() called by that function, and decrement the
+ * event's module refcount.
+ */
+void trace_put_event_file(struct trace_event_file *file)
+{
+       mutex_lock(&event_mutex);
+       module_put(file->event_call->mod);
+       mutex_unlock(&event_mutex);
+
+       trace_array_put(file->tr);
+}
+EXPORT_SYMBOL_GPL(trace_put_event_file);
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 /* Avoid typos */
@@ -3082,7 +3168,7 @@ int event_trace_del_tracer(struct trace_array *tr)
 
        down_write(&trace_event_sem);
        __trace_remove_event_dirs(tr);
-       tracefs_remove_recursive(tr->event_dir);
+       tracefs_remove(tr->event_dir);
        up_write(&trace_event_sem);
 
        tr->event_dir = NULL;
@@ -3409,8 +3495,8 @@ static void __init
 function_test_events_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
+       struct trace_buffer *buffer;
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
        struct ftrace_entry *entry;
        unsigned long flags;
        long disabled;
index f2896d1..e7ce7cd 100644 (file)
        C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"),     \
        C(INVALID_REF_KEY,      "Using variable references in keys not supported"), \
        C(VAR_NOT_FOUND,        "Couldn't find variable"),              \
-       C(FIELD_NOT_FOUND,      "Couldn't find field"),
+       C(FIELD_NOT_FOUND,      "Couldn't find field"),                 \
+       C(EMPTY_ASSIGNMENT,     "Empty assignment"),                    \
+       C(INVALID_SORT_MODIFIER,"Invalid sort modifier"),               \
+       C(EMPTY_SORT_FIELD,     "Empty sort field"),                    \
+       C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"),      \
+       C(INVALID_SORT_FIELD,   "Sort field must be a key or a val"),
 
 #undef C
 #define C(a, b)                HIST_ERR_##a
@@ -375,7 +380,7 @@ struct hist_trigger_data {
        unsigned int                    n_save_var_str;
 };
 
-static int synth_event_create(int argc, const char **argv);
+static int create_synth_event(int argc, const char **argv);
 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
 static int synth_event_release(struct dyn_event *ev);
 static bool synth_event_is_busy(struct dyn_event *ev);
@@ -383,7 +388,7 @@ static bool synth_event_match(const char *system, const char *event,
                        int argc, const char **argv, struct dyn_event *ev);
 
 static struct dyn_event_operations synth_event_ops = {
-       .create = synth_event_create,
+       .create = create_synth_event,
        .show = synth_event_show,
        .is_busy = synth_event_is_busy,
        .free = synth_event_release,
@@ -394,6 +399,7 @@ struct synth_field {
        char *type;
        char *name;
        size_t size;
+       unsigned int offset;
        bool is_signed;
        bool is_string;
 };
@@ -408,6 +414,7 @@ struct synth_event {
        struct trace_event_class                class;
        struct trace_event_call                 call;
        struct tracepoint                       *tp;
+       struct module                           *mod;
 };
 
 static bool is_synth_event(struct dyn_event *ev)
@@ -470,11 +477,12 @@ struct action_data {
         * When a histogram trigger is hit, the values of any
         * references to variables, including variables being passed
         * as parameters to synthetic events, are collected into a
-        * var_ref_vals array.  This var_ref_idx is the index of the
-        * first param in the array to be passed to the synthetic
-        * event invocation.
+        * var_ref_vals array.  This var_ref_idx array is an array of
+        * indices into the var_ref_vals array, one for each synthetic
+        * event param, and is passed to the synthetic event
+        * invocation.
         */
-       unsigned int            var_ref_idx;
+       unsigned int            var_ref_idx[TRACING_MAP_VARS_MAX];
        struct synth_event      *synth_event;
        bool                    use_trace_keyword;
        char                    *synth_event_name;
@@ -608,7 +616,8 @@ static void last_cmd_set(struct trace_event_file *file, char *str)
        if (!str)
                return;
 
-       strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
+       strcpy(last_cmd, "hist:");
+       strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:"));
 
        if (file) {
                call = file->event_call;
@@ -662,6 +671,8 @@ static int synth_event_define_fields(struct trace_event_call *call)
                if (ret)
                        break;
 
+               event->fields[i]->offset = n_u64;
+
                if (event->fields[i]->is_string) {
                        offset += STR_VAR_LEN_MAX;
                        n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
@@ -834,7 +845,7 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
                fmt = synth_field_fmt(se->fields[i]->type);
 
                /* parameter types */
-               if (tr->trace_flags & TRACE_ITER_VERBOSE)
+               if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
                        trace_seq_printf(s, "%s ", fmt);
 
                snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
@@ -875,14 +886,14 @@ static struct trace_event_functions synth_event_funcs = {
 
 static notrace void trace_event_raw_event_synth(void *__data,
                                                u64 *var_ref_vals,
-                                               unsigned int var_ref_idx)
+                                               unsigned int *var_ref_idx)
 {
        struct trace_event_file *trace_file = __data;
        struct synth_trace_event *entry;
        struct trace_event_buffer fbuffer;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        struct synth_event *event;
-       unsigned int i, n_u64;
+       unsigned int i, n_u64, val_idx;
        int fields_size = 0;
 
        event = trace_file->event_call->data;
@@ -896,7 +907,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
         * Avoid ring buffer recursion detection, as this event
         * is being performed within another event.
         */
-       buffer = trace_file->tr->trace_buffer.buffer;
+       buffer = trace_file->tr->array_buffer.buffer;
        ring_buffer_nest_start(buffer);
 
        entry = trace_event_buffer_reserve(&fbuffer, trace_file,
@@ -905,15 +916,16 @@ static notrace void trace_event_raw_event_synth(void *__data,
                goto out;
 
        for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+               val_idx = var_ref_idx[i];
                if (event->fields[i]->is_string) {
-                       char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
+                       char *str_val = (char *)(long)var_ref_vals[val_idx];
                        char *str_field = (char *)&entry->fields[n_u64];
 
                        strscpy(str_field, str_val, STR_VAR_LEN_MAX);
                        n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
                } else {
                        struct synth_field *field = event->fields[i];
-                       u64 val = var_ref_vals[var_ref_idx + i];
+                       u64 val = var_ref_vals[val_idx];
 
                        switch (field->size) {
                        case 1:
@@ -1113,10 +1125,10 @@ static struct tracepoint *alloc_synth_tracepoint(char *name)
 }
 
 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
-                                   unsigned int var_ref_idx);
+                                   unsigned int *var_ref_idx);
 
 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
-                              unsigned int var_ref_idx)
+                              unsigned int *var_ref_idx)
 {
        struct tracepoint *tp = event->tp;
 
@@ -1293,6 +1305,273 @@ struct hist_var_data {
        struct hist_trigger_data *hist_data;
 };
 
+static int synth_event_check_arg_fn(void *data)
+{
+       struct dynevent_arg_pair *arg_pair = data;
+       int size;
+
+       size = synth_field_size((char *)arg_pair->lhs);
+
+       return size ? 0 : -EINVAL;
+}
+
+/**
+ * synth_event_add_field - Add a new field to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @type: The type of the new field to add
+ * @name: The name of the new field to add
+ *
+ * Add a new field to a synthetic event cmd object.  Field ordering is in
+ * the same order the fields are added.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
+                         const char *name)
+{
+       struct dynevent_arg_pair arg_pair;
+       int ret;
+
+       if (cmd->type != DYNEVENT_TYPE_SYNTH)
+               return -EINVAL;
+
+       if (!type || !name)
+               return -EINVAL;
+
+       dynevent_arg_pair_init(&arg_pair, 0, ';');
+
+       arg_pair.lhs = type;
+       arg_pair.rhs = name;
+
+       ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
+       if (ret)
+               return ret;
+
+       if (++cmd->n_fields > SYNTH_FIELDS_MAX)
+               ret = -EINVAL;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_field);
+
+/**
+ * synth_event_add_field_str - Add a new field to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @type_name: The type and name of the new field to add, as a single string
+ *
+ * Add a new field to a synthetic event cmd object, as a single
+ * string.  The @type_name string is expected to be of the form 'type
+ * name', which will be appended by ';'.  No sanity checking is done -
+ * what's passed in is assumed to already be well-formed.  Field
+ * ordering is in the same order the fields are added.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
+{
+       struct dynevent_arg arg;
+       int ret;
+
+       if (cmd->type != DYNEVENT_TYPE_SYNTH)
+               return -EINVAL;
+
+       if (!type_name)
+               return -EINVAL;
+
+       dynevent_arg_init(&arg, ';');
+
+       arg.str = type_name;
+
+       ret = dynevent_arg_add(cmd, &arg, NULL);
+       if (ret)
+               return ret;
+
+       if (++cmd->n_fields > SYNTH_FIELDS_MAX)
+               ret = -EINVAL;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_field_str);
+
+/**
+ * synth_event_add_fields - Add multiple fields to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ *
+ * Add a new set of fields to a synthetic event cmd object.  The event
+ * fields that will be defined for the event should be passed in as an
+ * array of struct synth_field_desc, and the number of elements in the
+ * array passed in as n_fields.  Field ordering will retain the
+ * ordering given in the fields array.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_fields(struct dynevent_cmd *cmd,
+                          struct synth_field_desc *fields,
+                          unsigned int n_fields)
+{
+       unsigned int i;
+       int ret = 0;
+
+       for (i = 0; i < n_fields; i++) {
+               if (fields[i].type == NULL || fields[i].name == NULL) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_fields);
+
+/**
+ * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the synthetic event
+ * @mod: The module creating the event, NULL if not created from a module
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the synth_event_gen_cmd_start() wrapper, which
+ * automatically adds a NULL to the end of the arg list.  If this
+ * function is used directly, make sure the last arg in the variable
+ * arg list is NULL.
+ *
+ * Generate a synthetic event command to be executed by
+ * synth_event_gen_cmd_end().  This function can be used to generate
+ * the complete command or only the first part of it; in the latter
+ * case, synth_event_add_field(), synth_event_add_field_str(), or
+ * synth_event_add_fields() can be used to add more fields following
+ * this.
+ *
+ * There should be an even number variable args, each pair consisting
+ * of a type followed by a field name.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
+                               struct module *mod, ...)
+{
+       struct dynevent_arg arg;
+       va_list args;
+       int ret;
+
+       cmd->event_name = name;
+       cmd->private_data = mod;
+
+       if (cmd->type != DYNEVENT_TYPE_SYNTH)
+               return -EINVAL;
+
+       dynevent_arg_init(&arg, 0);
+       arg.str = name;
+       ret = dynevent_arg_add(cmd, &arg, NULL);
+       if (ret)
+               return ret;
+
+       va_start(args, mod);
+       for (;;) {
+               const char *type, *name;
+
+               type = va_arg(args, const char *);
+               if (!type)
+                       break;
+               name = va_arg(args, const char *);
+               if (!name)
+                       break;
+
+               if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               ret = synth_event_add_field(cmd, type, name);
+               if (ret)
+                       break;
+       }
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
+
+/**
+ * synth_event_gen_cmd_array_start - Start synthetic event command from an array
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the synthetic event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ *
+ * Generate a synthetic event command to be executed by
+ * synth_event_gen_cmd_end().  This function can be used to generate
+ * the complete command or only the first part of it; in the latter
+ * case, synth_event_add_field(), synth_event_add_field_str(), or
+ * synth_event_add_fields() can be used to add more fields following
+ * this.
+ *
+ * The event fields that will be defined for the event should be
+ * passed in as an array of struct synth_field_desc, and the number of
+ * elements in the array passed in as n_fields.  Field ordering will
+ * retain the ordering given in the fields array.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
+                                   struct module *mod,
+                                   struct synth_field_desc *fields,
+                                   unsigned int n_fields)
+{
+       struct dynevent_arg arg;
+       unsigned int i;
+       int ret = 0;
+
+       cmd->event_name = name;
+       cmd->private_data = mod;
+
+       if (cmd->type != DYNEVENT_TYPE_SYNTH)
+               return -EINVAL;
+
+       if (n_fields > SYNTH_FIELDS_MAX)
+               return -EINVAL;
+
+       dynevent_arg_init(&arg, 0);
+       arg.str = name;
+       ret = dynevent_arg_add(cmd, &arg, NULL);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < n_fields; i++) {
+               if (fields[i].type == NULL || fields[i].name == NULL)
+                       return -EINVAL;
+
+               ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
+
 static int __create_synth_event(int argc, const char *name, const char **argv)
 {
        struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
@@ -1361,29 +1640,123 @@ static int __create_synth_event(int argc, const char *name, const char **argv)
        goto out;
 }
 
+/**
+ * synth_event_create - Create a new synthetic event
+ * @name: The name of the new sythetic event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ * @mod: The module creating the event, NULL if not created from a module
+ *
+ * Create a new synthetic event with the given name under the
+ * trace/events/synthetic/ directory.  The event fields that will be
+ * defined for the event should be passed in as an array of struct
+ * synth_field_desc, and the number elements in the array passed in as
+ * n_fields. Field ordering will retain the ordering given in the
+ * fields array.
+ *
+ * If the new synthetic event is being created from a module, the mod
+ * param must be non-NULL.  This will ensure that the trace buffer
+ * won't contain unreadable events.
+ *
+ * The new synth event should be deleted using synth_event_delete()
+ * function.  The new synthetic event can be generated from modules or
+ * other kernel code using trace_synth_event() and related functions.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_create(const char *name, struct synth_field_desc *fields,
+                      unsigned int n_fields, struct module *mod)
+{
+       struct dynevent_cmd cmd;
+       char *buf;
+       int ret;
+
+       buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+       ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
+                                             fields, n_fields);
+       if (ret)
+               goto out;
+
+       ret = synth_event_gen_cmd_end(&cmd);
+ out:
+       kfree(buf);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_create);
+
+static int destroy_synth_event(struct synth_event *se)
+{
+       int ret;
+
+       if (se->ref)
+               ret = -EBUSY;
+       else {
+               ret = unregister_synth_event(se);
+               if (!ret) {
+                       dyn_event_remove(&se->devent);
+                       free_synth_event(se);
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * synth_event_delete - Delete a synthetic event
+ * @event_name: The name of the new sythetic event
+ *
+ * Delete a synthetic event that was created with synth_event_create().
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_delete(const char *event_name)
+{
+       struct synth_event *se = NULL;
+       struct module *mod = NULL;
+       int ret = -ENOENT;
+
+       mutex_lock(&event_mutex);
+       se = find_synth_event(event_name);
+       if (se) {
+               mod = se->mod;
+               ret = destroy_synth_event(se);
+       }
+       mutex_unlock(&event_mutex);
+
+       if (mod) {
+               mutex_lock(&trace_types_lock);
+               /*
+                * It is safest to reset the ring buffer if the module
+                * being unloaded registered any events that were
+                * used. The only worry is if a new module gets
+                * loaded, and takes on the same id as the events of
+                * this module. When printing out the buffer, traced
+                * events left over from this module may be passed to
+                * the new module events and unexpected results may
+                * occur.
+                */
+               tracing_reset_all_online_cpus();
+               mutex_unlock(&trace_types_lock);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_delete);
+
 static int create_or_delete_synth_event(int argc, char **argv)
 {
        const char *name = argv[0];
-       struct synth_event *event = NULL;
        int ret;
 
        /* trace_run_command() ensures argc != 0 */
        if (name[0] == '!') {
-               mutex_lock(&event_mutex);
-               event = find_synth_event(name + 1);
-               if (event) {
-                       if (event->ref)
-                               ret = -EBUSY;
-                       else {
-                               ret = unregister_synth_event(event);
-                               if (!ret) {
-                                       dyn_event_remove(&event->devent);
-                                       free_synth_event(event);
-                               }
-                       }
-               } else
-                       ret = -ENOENT;
-               mutex_unlock(&event_mutex);
+               ret = synth_event_delete(name + 1);
                return ret;
        }
 
@@ -1391,7 +1764,474 @@ static int create_or_delete_synth_event(int argc, char **argv)
        return ret == -ECANCELED ? -EINVAL : ret;
 }
 
-static int synth_event_create(int argc, const char **argv)
+static int synth_event_run_command(struct dynevent_cmd *cmd)
+{
+       struct synth_event *se;
+       int ret;
+
+       ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event);
+       if (ret)
+               return ret;
+
+       se = find_synth_event(cmd->event_name);
+       if (WARN_ON(!se))
+               return -ENOENT;
+
+       se->mod = cmd->private_data;
+
+       return ret;
+}
+
+/**
+ * synth_event_cmd_init - Initialize a synthetic event command object
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @buf: A pointer to the buffer used to build the command
+ * @maxlen: The length of the buffer passed in @buf
+ *
+ * Initialize a synthetic event command object.  Use this before
+ * calling any of the other dyenvent_cmd functions.
+ */
+void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
+{
+       dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
+                         synth_event_run_command);
+}
+EXPORT_SYMBOL_GPL(synth_event_cmd_init);
+
+/**
+ * synth_event_trace - Trace a synthetic event
+ * @file: The trace_event_file representing the synthetic event
+ * @n_vals: The number of values in vals
+ * @args: Variable number of args containing the event values
+ *
+ * Trace a synthetic event using the values passed in the variable
+ * argument list.
+ *
+ * The argument list should be a list 'n_vals' u64 values.  The number
+ * of vals must match the number of field in the synthetic event, and
+ * must be in the same order as the synthetic event fields.
+ *
+ * All vals should be cast to u64, and string vals are just pointers
+ * to strings, cast to u64.  Strings will be copied into space
+ * reserved in the event for the string, using these pointers.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
+{
+       struct trace_event_buffer fbuffer;
+       struct synth_trace_event *entry;
+       struct trace_buffer *buffer;
+       struct synth_event *event;
+       unsigned int i, n_u64;
+       int fields_size = 0;
+       va_list args;
+       int ret = 0;
+
+       /*
+        * Normal event generation doesn't get called at all unless
+        * the ENABLED bit is set (which attaches the probe thus
+        * allowing this code to be called, etc).  Because this is
+        * called directly by the user, we don't have that but we
+        * still need to honor not logging when disabled.
+        */
+       if (!(file->flags & EVENT_FILE_FL_ENABLED))
+               return 0;
+
+       event = file->event_call->data;
+
+       if (n_vals != event->n_fields)
+               return -EINVAL;
+
+       if (trace_trigger_soft_disabled(file))
+               return -EINVAL;
+
+       fields_size = event->n_u64 * sizeof(u64);
+
+       /*
+        * Avoid ring buffer recursion detection, as this event
+        * is being performed within another event.
+        */
+       buffer = file->tr->array_buffer.buffer;
+       ring_buffer_nest_start(buffer);
+
+       entry = trace_event_buffer_reserve(&fbuffer, file,
+                                          sizeof(*entry) + fields_size);
+       if (!entry) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       va_start(args, n_vals);
+       for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+               u64 val;
+
+               val = va_arg(args, u64);
+
+               if (event->fields[i]->is_string) {
+                       char *str_val = (char *)(long)val;
+                       char *str_field = (char *)&entry->fields[n_u64];
+
+                       strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+                       n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+               } else {
+                       entry->fields[n_u64] = val;
+                       n_u64++;
+               }
+       }
+       va_end(args);
+
+       trace_event_buffer_commit(&fbuffer);
+out:
+       ring_buffer_nest_end(buffer);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace);
+
+/**
+ * synth_event_trace_array - Trace a synthetic event from an array
+ * @file: The trace_event_file representing the synthetic event
+ * @vals: Array of values
+ * @n_vals: The number of values in vals
+ *
+ * Trace a synthetic event using the values passed in as 'vals'.
+ *
+ * The 'vals' array is just an array of 'n_vals' u64.  The number of
+ * vals must match the number of field in the synthetic event, and
+ * must be in the same order as the synthetic event fields.
+ *
+ * All vals should be cast to u64, and string vals are just pointers
+ * to strings, cast to u64.  Strings will be copied into space
+ * reserved in the event for the string, using these pointers.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
+                           unsigned int n_vals)
+{
+       struct trace_event_buffer fbuffer;
+       struct synth_trace_event *entry;
+       struct trace_buffer *buffer;
+       struct synth_event *event;
+       unsigned int i, n_u64;
+       int fields_size = 0;
+       int ret = 0;
+
+       /*
+        * Normal event generation doesn't get called at all unless
+        * the ENABLED bit is set (which attaches the probe thus
+        * allowing this code to be called, etc).  Because this is
+        * called directly by the user, we don't have that but we
+        * still need to honor not logging when disabled.
+        */
+       if (!(file->flags & EVENT_FILE_FL_ENABLED))
+               return 0;
+
+       event = file->event_call->data;
+
+       if (n_vals != event->n_fields)
+               return -EINVAL;
+
+       if (trace_trigger_soft_disabled(file))
+               return -EINVAL;
+
+       fields_size = event->n_u64 * sizeof(u64);
+
+       /*
+        * Avoid ring buffer recursion detection, as this event
+        * is being performed within another event.
+        */
+       buffer = file->tr->array_buffer.buffer;
+       ring_buffer_nest_start(buffer);
+
+       entry = trace_event_buffer_reserve(&fbuffer, file,
+                                          sizeof(*entry) + fields_size);
+       if (!entry) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+               if (event->fields[i]->is_string) {
+                       char *str_val = (char *)(long)vals[i];
+                       char *str_field = (char *)&entry->fields[n_u64];
+
+                       strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+                       n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+               } else {
+                       entry->fields[n_u64] = vals[i];
+                       n_u64++;
+               }
+       }
+
+       trace_event_buffer_commit(&fbuffer);
+out:
+       ring_buffer_nest_end(buffer);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_array);
+
+/**
+ * synth_event_trace_start - Start piecewise synthetic event trace
+ * @file: The trace_event_file representing the synthetic event
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Start the trace of a synthetic event field-by-field rather than all
+ * at once.
+ *
+ * This function 'opens' an event trace, which means space is reserved
+ * for the event in the trace buffer, after which the event's
+ * individual field values can be set through either
+ * synth_event_add_next_val() or synth_event_add_val().
+ *
+ * A pointer to a trace_state object is passed in, which will keep
+ * track of the current event trace state until the event trace is
+ * closed (and the event finally traced) using
+ * synth_event_trace_end().
+ *
+ * Note that synth_event_trace_end() must be called after all values
+ * have been added for each event trace, regardless of whether adding
+ * all field values succeeded or not.
+ *
+ * Note also that for a given event trace, all fields must be added
+ * using either synth_event_add_next_val() or synth_event_add_val()
+ * but not both together or interleaved.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_start(struct trace_event_file *file,
+                           struct synth_event_trace_state *trace_state)
+{
+       struct synth_trace_event *entry;
+       int fields_size = 0;
+       int ret = 0;
+
+       if (!trace_state) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       memset(trace_state, '\0', sizeof(*trace_state));
+
+       /*
+        * Normal event tracing doesn't get called at all unless the
+        * ENABLED bit is set (which attaches the probe thus allowing
+        * this code to be called, etc).  Because this is called
+        * directly by the user, we don't have that but we still need
+        * to honor not logging when disabled.  For the the iterated
+        * trace case, we save the enabed state upon start and just
+        * ignore the following data calls.
+        */
+       if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
+               trace_state->enabled = false;
+               goto out;
+       }
+
+       trace_state->enabled = true;
+
+       trace_state->event = file->event_call->data;
+
+       if (trace_trigger_soft_disabled(file)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       fields_size = trace_state->event->n_u64 * sizeof(u64);
+
+       /*
+        * Avoid ring buffer recursion detection, as this event
+        * is being performed within another event.
+        */
+       trace_state->buffer = file->tr->array_buffer.buffer;
+       ring_buffer_nest_start(trace_state->buffer);
+
+       entry = trace_event_buffer_reserve(&trace_state->fbuffer, file,
+                                          sizeof(*entry) + fields_size);
+       if (!entry) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       trace_state->entry = entry;
+out:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_start);
+
+static int __synth_event_add_val(const char *field_name, u64 val,
+                                struct synth_event_trace_state *trace_state)
+{
+       struct synth_field *field = NULL;
+       struct synth_trace_event *entry;
+       struct synth_event *event;
+       int i, ret = 0;
+
+       if (!trace_state) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* can't mix add_next_synth_val() with add_synth_val() */
+       if (field_name) {
+               if (trace_state->add_next) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               trace_state->add_name = true;
+       } else {
+               if (trace_state->add_name) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               trace_state->add_next = true;
+       }
+
+       if (!trace_state->enabled)
+               goto out;
+
+       event = trace_state->event;
+       if (trace_state->add_name) {
+               for (i = 0; i < event->n_fields; i++) {
+                       field = event->fields[i];
+                       if (strcmp(field->name, field_name) == 0)
+                               break;
+               }
+               if (!field) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       } else {
+               if (trace_state->cur_field >= event->n_fields) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               field = event->fields[trace_state->cur_field++];
+       }
+
+       entry = trace_state->entry;
+       if (field->is_string) {
+               char *str_val = (char *)(long)val;
+               char *str_field;
+
+               if (!str_val) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               str_field = (char *)&entry->fields[field->offset];
+               strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+       } else
+               entry->fields[field->offset] = val;
+ out:
+       return ret;
+}
+
+/**
+ * synth_event_add_next_val - Add the next field's value to an open synth trace
+ * @val: The value to set the next field to
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Set the value of the next field in an event that's been opened by
+ * synth_event_trace_start().
+ *
+ * The val param should be the value cast to u64.  If the value points
+ * to a string, the val param should be a char * cast to u64.
+ *
+ * This function assumes all the fields in an event are to be set one
+ * after another - successive calls to this function are made, one for
+ * each field, in the order of the fields in the event, until all
+ * fields have been set.  If you'd rather set each field individually
+ * without regard to ordering, synth_event_add_val() can be used
+ * instead.
+ *
+ * Note however that synth_event_add_next_val() and
+ * synth_event_add_val() can't be intermixed for a given event trace -
+ * one or the other but not both can be used at the same time.
+ *
+ * Note also that synth_event_trace_end() must be called after all
+ * values have been added for each event trace, regardless of whether
+ * adding all field values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_add_next_val(u64 val,
+                            struct synth_event_trace_state *trace_state)
+{
+       return __synth_event_add_val(NULL, val, trace_state);
+}
+EXPORT_SYMBOL_GPL(synth_event_add_next_val);
+
+/**
+ * synth_event_add_val - Add a named field's value to an open synth trace
+ * @field_name: The name of the synthetic event field value to set
+ * @val: The value to set the next field to
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Set the value of the named field in an event that's been opened by
+ * synth_event_trace_start().
+ *
+ * The val param should be the value cast to u64.  If the value points
+ * to a string, the val param should be a char * cast to u64.
+ *
+ * This function looks up the field name, and if found, sets the field
+ * to the specified value.  This lookup makes this function more
+ * expensive than synth_event_add_next_val(), so use that or the
+ * none-piecewise synth_event_trace() instead if efficiency is more
+ * important.
+ *
+ * Note however that synth_event_add_next_val() and
+ * synth_event_add_val() can't be intermixed for a given event trace -
+ * one or the other but not both can be used at the same time.
+ *
+ * Note also that synth_event_trace_end() must be called after all
+ * values have been added for each event trace, regardless of whether
+ * adding all field values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_add_val(const char *field_name, u64 val,
+                       struct synth_event_trace_state *trace_state)
+{
+       return __synth_event_add_val(field_name, val, trace_state);
+}
+EXPORT_SYMBOL_GPL(synth_event_add_val);
+
+/**
+ * synth_event_trace_end - End piecewise synthetic event trace
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * End the trace of a synthetic event opened by
+ * synth_event_trace__start().
+ *
+ * This function 'closes' an event trace, which basically means that
+ * it commits the reserved event and cleans up other loose ends.
+ *
+ * A pointer to a trace_state object is passed in, which will keep
+ * track of the current event trace state opened with
+ * synth_event_trace_start().
+ *
+ * Note that this function must be called after all values have been
+ * added for each event trace, regardless of whether adding all field
+ * values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_end(struct synth_event_trace_state *trace_state)
+{
+       if (!trace_state)
+               return -EINVAL;
+
+       trace_event_buffer_commit(&trace_state->fbuffer);
+
+       ring_buffer_nest_end(trace_state->buffer);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_end);
+
+static int create_synth_event(int argc, const char **argv)
 {
        const char *name = argv[0];
        int len;
@@ -2041,12 +2881,6 @@ static int parse_map_size(char *str)
        unsigned long size, map_bits;
        int ret;
 
-       strsep(&str, "=");
-       if (!str) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        ret = kstrtoul(str, 0, &size);
        if (ret)
                goto out;
@@ -2106,25 +2940,25 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs)
 static int parse_assignment(struct trace_array *tr,
                            char *str, struct hist_trigger_attrs *attrs)
 {
-       int ret = 0;
+       int len, ret = 0;
 
-       if ((str_has_prefix(str, "key=")) ||
-           (str_has_prefix(str, "keys="))) {
-               attrs->keys_str = kstrdup(str, GFP_KERNEL);
+       if ((len = str_has_prefix(str, "key=")) ||
+           (len = str_has_prefix(str, "keys="))) {
+               attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
                if (!attrs->keys_str) {
                        ret = -ENOMEM;
                        goto out;
                }
-       } else if ((str_has_prefix(str, "val=")) ||
-                  (str_has_prefix(str, "vals=")) ||
-                  (str_has_prefix(str, "values="))) {
-               attrs->vals_str = kstrdup(str, GFP_KERNEL);
+       } else if ((len = str_has_prefix(str, "val=")) ||
+                  (len = str_has_prefix(str, "vals=")) ||
+                  (len = str_has_prefix(str, "values="))) {
+               attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
                if (!attrs->vals_str) {
                        ret = -ENOMEM;
                        goto out;
                }
-       } else if (str_has_prefix(str, "sort=")) {
-               attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
+       } else if ((len = str_has_prefix(str, "sort="))) {
+               attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
                if (!attrs->sort_key_str) {
                        ret = -ENOMEM;
                        goto out;
@@ -2135,12 +2969,8 @@ static int parse_assignment(struct trace_array *tr,
                        ret = -ENOMEM;
                        goto out;
                }
-       } else if (str_has_prefix(str, "clock=")) {
-               strsep(&str, "=");
-               if (!str) {
-                       ret = -EINVAL;
-                       goto out;
-               }
+       } else if ((len = str_has_prefix(str, "clock="))) {
+               str += len;
 
                str = strstrip(str);
                attrs->clock = kstrdup(str, GFP_KERNEL);
@@ -2148,8 +2978,8 @@ static int parse_assignment(struct trace_array *tr,
                        ret = -ENOMEM;
                        goto out;
                }
-       } else if (str_has_prefix(str, "size=")) {
-               int map_bits = parse_map_size(str);
+       } else if ((len = str_has_prefix(str, "size="))) {
+               int map_bits = parse_map_size(str + len);
 
                if (map_bits < 0) {
                        ret = map_bits;
@@ -2189,8 +3019,15 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
 
        while (trigger_str) {
                char *str = strsep(&trigger_str, ":");
+               char *rhs;
 
-               if (strchr(str, '=')) {
+               rhs = strchr(str, '=');
+               if (rhs) {
+                       if (!strlen(++rhs)) {
+                               ret = -EINVAL;
+                               hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
+                               goto free;
+                       }
                        ret = parse_assignment(tr, str, attrs);
                        if (ret)
                                goto free;
@@ -2661,6 +3498,22 @@ static int init_var_ref(struct hist_field *ref_field,
        goto out;
 }
 
+static int find_var_ref_idx(struct hist_trigger_data *hist_data,
+                           struct hist_field *var_field)
+{
+       struct hist_field *ref_field;
+       int i;
+
+       for (i = 0; i < hist_data->n_var_refs; i++) {
+               ref_field = hist_data->var_refs[i];
+               if (ref_field->var.idx == var_field->var.idx &&
+                   ref_field->var.hist_data == var_field->hist_data)
+                       return i;
+       }
+
+       return -ENOENT;
+}
+
 /**
  * create_var_ref - Create a variable reference and attach it to trigger
  * @hist_data: The trigger that will be referencing the variable
@@ -4146,8 +4999,11 @@ static int check_synth_field(struct synth_event *event,
 
        field = event->fields[field_pos];
 
-       if (strcmp(field->type, hist_field->type) != 0)
-               return -EINVAL;
+       if (strcmp(field->type, hist_field->type) != 0) {
+               if (field->size != hist_field->size ||
+                   field->is_signed != hist_field->is_signed)
+                       return -EINVAL;
+       }
 
        return 0;
 }
@@ -4234,11 +5090,11 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
        struct trace_array *tr = hist_data->event_file->tr;
        char *event_name, *param, *system = NULL;
        struct hist_field *hist_field, *var_ref;
-       unsigned int i, var_ref_idx;
+       unsigned int i;
        unsigned int field_pos = 0;
        struct synth_event *event;
        char *synth_event_name;
-       int ret = 0;
+       int var_ref_idx, ret = 0;
 
        lockdep_assert_held(&event_mutex);
 
@@ -4255,8 +5111,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
 
        event->ref++;
 
-       var_ref_idx = hist_data->n_var_refs;
-
        for (i = 0; i < data->n_params; i++) {
                char *p;
 
@@ -4305,6 +5159,14 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
                                goto err;
                        }
 
+                       var_ref_idx = find_var_ref_idx(hist_data, var_ref);
+                       if (WARN_ON(var_ref_idx < 0)) {
+                               ret = var_ref_idx;
+                               goto err;
+                       }
+
+                       data->var_ref_idx[i] = var_ref_idx;
+
                        field_pos++;
                        kfree(p);
                        continue;
@@ -4323,7 +5185,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
        }
 
        data->synth_event = event;
-       data->var_ref_idx = var_ref_idx;
  out:
        return ret;
  err:
@@ -4542,10 +5403,6 @@ static int create_val_fields(struct hist_trigger_data *hist_data,
        if (!fields_str)
                goto out;
 
-       strsep(&fields_str, "=");
-       if (!fields_str)
-               goto out;
-
        for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
                     j < TRACING_MAP_VALS_MAX; i++) {
                field_str = strsep(&fields_str, ",");
@@ -4640,10 +5497,6 @@ static int create_key_fields(struct hist_trigger_data *hist_data,
        if (!fields_str)
                goto out;
 
-       strsep(&fields_str, "=");
-       if (!fields_str)
-               goto out;
-
        for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
                field_str = strsep(&fields_str, ",");
                if (!field_str)
@@ -4775,7 +5628,7 @@ static int create_hist_fields(struct hist_trigger_data *hist_data,
        return ret;
 }
 
-static int is_descending(const char *str)
+static int is_descending(struct trace_array *tr, const char *str)
 {
        if (!str)
                return 0;
@@ -4786,11 +5639,14 @@ static int is_descending(const char *str)
        if (strcmp(str, "ascending") == 0)
                return 0;
 
+       hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
+
        return -EINVAL;
 }
 
 static int create_sort_keys(struct hist_trigger_data *hist_data)
 {
+       struct trace_array *tr = hist_data->event_file->tr;
        char *fields_str = hist_data->attrs->sort_key_str;
        struct tracing_map_sort_key *sort_key;
        int descending, ret = 0;
@@ -4801,12 +5657,6 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
        if (!fields_str)
                goto out;
 
-       strsep(&fields_str, "=");
-       if (!fields_str) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
                struct hist_field *hist_field;
                char *field_str, *field_name;
@@ -4815,25 +5665,30 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
                sort_key = &hist_data->sort_keys[i];
 
                field_str = strsep(&fields_str, ",");
-               if (!field_str) {
-                       if (i == 0)
-                               ret = -EINVAL;
+               if (!field_str)
+                       break;
+
+               if (!*field_str) {
+                       ret = -EINVAL;
+                       hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
                        break;
                }
 
                if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
+                       hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
                        ret = -EINVAL;
                        break;
                }
 
                field_name = strsep(&field_str, ".");
-               if (!field_name) {
+               if (!field_name || !*field_name) {
                        ret = -EINVAL;
+                       hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
                        break;
                }
 
                if (strcmp(field_name, "hitcount") == 0) {
-                       descending = is_descending(field_str);
+                       descending = is_descending(tr, field_str);
                        if (descending < 0) {
                                ret = descending;
                                break;
@@ -4855,7 +5710,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
 
                        if (strcmp(field_name, test_name) == 0) {
                                sort_key->field_idx = idx;
-                               descending = is_descending(field_str);
+                               descending = is_descending(tr, field_str);
                                if (descending < 0) {
                                        ret = descending;
                                        goto out;
@@ -4866,6 +5721,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
                }
                if (j == hist_data->n_fields) {
                        ret = -EINVAL;
+                       hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
                        break;
                }
        }
index 40106ff..dd34a1b 100644 (file)
@@ -116,9 +116,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 {
        struct trace_event_file *event_file = event_file_data(m->private);
 
-       if (t == SHOW_AVAILABLE_TRIGGERS)
+       if (t == SHOW_AVAILABLE_TRIGGERS) {
+               (*pos)++;
                return NULL;
-
+       }
        return seq_list_next(t, &event_file->triggers, pos);
 }
 
@@ -213,7 +214,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
        return ret;
 }
 
-static int trigger_process_regex(struct trace_event_file *file, char *buff)
+int trigger_process_regex(struct trace_event_file *file, char *buff)
 {
        char *command, *next = buff;
        struct event_command *p;
index b611cd3..8a4c8d5 100644 (file)
@@ -101,7 +101,7 @@ static int function_trace_init(struct trace_array *tr)
 
        ftrace_init_array_ops(tr, func);
 
-       tr->trace_buffer.cpu = get_cpu();
+       tr->array_buffer.cpu = get_cpu();
        put_cpu();
 
        tracing_start_cmdline_record();
@@ -118,7 +118,7 @@ static void function_trace_reset(struct trace_array *tr)
 
 static void function_trace_start(struct trace_array *tr)
 {
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
 }
 
 static void
@@ -143,7 +143,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
                goto out;
 
        cpu = smp_processor_id();
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       data = per_cpu_ptr(tr->array_buffer.data, cpu);
        if (!atomic_read(&data->disabled)) {
                local_save_flags(flags);
                trace_function(tr, ip, parent_ip, flags, pc);
@@ -192,7 +192,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
         */
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       data = per_cpu_ptr(tr->array_buffer.data, cpu);
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1)) {
index 78af971..7d71546 100644 (file)
@@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr,
 {
        struct trace_event_call *call = &event_funcgraph_entry;
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
        struct ftrace_graph_ent_entry *entry;
 
        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -171,7 +171,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
 
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       data = per_cpu_ptr(tr->array_buffer.data, cpu);
        disabled = atomic_inc_return(&data->disabled);
        if (likely(disabled == 1)) {
                pc = preempt_count();
@@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr,
 {
        struct trace_event_call *call = &event_funcgraph_exit;
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
        struct ftrace_graph_ret_entry *entry;
 
        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -252,7 +252,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
 
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       data = per_cpu_ptr(tr->array_buffer.data, cpu);
        disabled = atomic_inc_return(&data->disabled);
        if (likely(disabled == 1)) {
                pc = preempt_count();
@@ -444,9 +444,9 @@ get_return_for_leaf(struct trace_iterator *iter,
                         * We need to consume the current entry to see
                         * the next one.
                         */
-                       ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
+                       ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
                                            NULL, NULL);
-                       event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
+                       event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
                                                 NULL, NULL);
                }
 
@@ -503,7 +503,7 @@ print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
 {
        unsigned long long usecs;
 
-       usecs = iter->ts - iter->trace_buffer->time_start;
+       usecs = iter->ts - iter->array_buffer->time_start;
        do_div(usecs, NSEC_PER_USEC);
 
        trace_seq_printf(s, "%9llu us |  ", usecs);
index 6638d63..a48808c 100644 (file)
@@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
 {
        struct trace_array *tr = hwlat_trace;
        struct trace_event_call *call = &event_hwlat;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
        struct ring_buffer_event *event;
        struct hwlat_entry *entry;
        unsigned long flags;
@@ -556,7 +556,7 @@ static int init_tracefs(void)
        return 0;
 
  err:
-       tracefs_remove_recursive(top_dir);
+       tracefs_remove(top_dir);
        return -ENOMEM;
 }
 
index a745b0c..10bbb0f 100644 (file)
@@ -122,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr,
        if (!irqs_disabled_flags(*flags) && !preempt_count())
                return 0;
 
-       *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       *data = per_cpu_ptr(tr->array_buffer.data, cpu);
        disabled = atomic_inc_return(&(*data)->disabled);
 
        if (likely(disabled == 1))
@@ -167,7 +167,7 @@ static int irqsoff_display_graph(struct trace_array *tr, int set)
                per_cpu(tracing_cpu, cpu) = 0;
 
        tr->max_latency = 0;
-       tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
+       tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
 
        return start_irqsoff_tracer(irqsoff_trace, set);
 }
@@ -382,7 +382,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
        if (per_cpu(tracing_cpu, cpu))
                return;
 
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       data = per_cpu_ptr(tr->array_buffer.data, cpu);
 
        if (unlikely(!data) || atomic_read(&data->disabled))
                return;
@@ -420,7 +420,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
        if (!tracer_enabled || !tracing_is_enabled())
                return;
 
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       data = per_cpu_ptr(tr->array_buffer.data, cpu);
 
        if (unlikely(!data) ||
            !data->critical_start || atomic_read(&data->disabled))
index cca6504..9da7610 100644 (file)
@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
        if (cpu_file == RING_BUFFER_ALL_CPUS) {
                for_each_tracing_cpu(cpu) {
                        iter.buffer_iter[cpu] =
-                       ring_buffer_read_prepare(iter.trace_buffer->buffer,
+                       ring_buffer_read_prepare(iter.array_buffer->buffer,
                                                 cpu, GFP_ATOMIC);
                        ring_buffer_read_start(iter.buffer_iter[cpu]);
                        tracing_iter_reset(&iter, cpu);
@@ -51,7 +51,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
        } else {
                iter.cpu_file = cpu_file;
                iter.buffer_iter[cpu_file] =
-                       ring_buffer_read_prepare(iter.trace_buffer->buffer,
+                       ring_buffer_read_prepare(iter.array_buffer->buffer,
                                                 cpu_file, GFP_ATOMIC);
                ring_buffer_read_start(iter.buffer_iter[cpu_file]);
                tracing_iter_reset(&iter, cpu_file);
@@ -124,7 +124,7 @@ static int kdb_ftdump(int argc, const char **argv)
        iter.buffer_iter = buffer_iter;
 
        for_each_tracing_cpu(cpu) {
-               atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+               atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
        }
 
        /* A negative skip_entries means skip all but the last entries */
@@ -139,7 +139,7 @@ static int kdb_ftdump(int argc, const char **argv)
        ftrace_dump_buf(skip_entries, cpu_file);
 
        for_each_tracing_cpu(cpu) {
-               atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+               atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
        }
 
        kdb_trap_printk--;
index aa515d5..d8264eb 100644 (file)
@@ -22,7 +22,6 @@
 
 #define KPROBE_EVENT_SYSTEM "kprobes"
 #define KRETPROBE_MAXACTIVE_MAX 4096
-#define MAX_KPROBE_CMDLINE_SIZE 1024
 
 /* Kprobe early definition from command line */
 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
@@ -902,6 +901,167 @@ static int create_or_delete_trace_kprobe(int argc, char **argv)
        return ret == -ECANCELED ? -EINVAL : ret;
 }
 
+static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
+{
+       return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe);
+}
+
+/**
+ * kprobe_event_cmd_init - Initialize a kprobe event command object
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @buf: A pointer to the buffer used to build the command
+ * @maxlen: The length of the buffer passed in @buf
+ *
+ * Initialize a synthetic event command object.  Use this before
+ * calling any of the other kprobe_event functions.
+ */
+void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
+{
+       dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
+                         trace_kprobe_run_command);
+}
+EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+
+/**
+ * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the kprobe event
+ * @loc: The location of the kprobe event
+ * @kretprobe: Is this a return probe?
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
+ * adds a NULL to the end of the arg list.  If this function is used
+ * directly, make sure the last arg in the variable arg list is NULL.
+ *
+ * Generate a kprobe event command to be executed by
+ * kprobe_event_gen_cmd_end().  This function can be used to generate the
+ * complete command or only the first part of it; in the latter case,
+ * kprobe_event_add_fields() can be used to add more fields following this.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
+                                const char *name, const char *loc, ...)
+{
+       char buf[MAX_EVENT_NAME_LEN];
+       struct dynevent_arg arg;
+       va_list args;
+       int ret;
+
+       if (cmd->type != DYNEVENT_TYPE_KPROBE)
+               return -EINVAL;
+
+       if (kretprobe)
+               snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
+       else
+               snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
+
+       ret = dynevent_str_add(cmd, buf);
+       if (ret)
+               return ret;
+
+       dynevent_arg_init(&arg, 0);
+       arg.str = loc;
+       ret = dynevent_arg_add(cmd, &arg, NULL);
+       if (ret)
+               return ret;
+
+       va_start(args, loc);
+       for (;;) {
+               const char *field;
+
+               field = va_arg(args, const char *);
+               if (!field)
+                       break;
+
+               if (++cmd->n_fields > MAX_TRACE_ARGS) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               arg.str = field;
+               ret = dynevent_arg_add(cmd, &arg, NULL);
+               if (ret)
+                       break;
+       }
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
+
+/**
+ * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the kprobe_event_add_fields() wrapper, which
+ * automatically adds a NULL to the end of the arg list.  If this
+ * function is used directly, make sure the last arg in the variable
+ * arg list is NULL.
+ *
+ * Add probe fields to an existing kprobe command using a variable
+ * list of args.  Fields are added in the same order they're listed.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
+{
+       struct dynevent_arg arg;
+       va_list args;
+       int ret;
+
+       if (cmd->type != DYNEVENT_TYPE_KPROBE)
+               return -EINVAL;
+
+       dynevent_arg_init(&arg, 0);
+
+       va_start(args, cmd);
+       for (;;) {
+               const char *field;
+
+               field = va_arg(args, const char *);
+               if (!field)
+                       break;
+
+               if (++cmd->n_fields > MAX_TRACE_ARGS) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               arg.str = field;
+               ret = dynevent_arg_add(cmd, &arg, NULL);
+               if (ret)
+                       break;
+       }
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
+
+/**
+ * kprobe_event_delete - Delete a kprobe event
+ * @name: The name of the kprobe event to delete
+ *
+ * Delete a kprobe event with the give @name from kernel code rather
+ * than directly from the command line.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int kprobe_event_delete(const char *name)
+{
+       char buf[MAX_EVENT_NAME_LEN];
+
+       snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
+
+       return trace_run_command(buf, create_or_delete_trace_kprobe);
+}
+EXPORT_SYMBOL_GPL(kprobe_event_delete);
+
 static int trace_kprobe_release(struct dyn_event *ev)
 {
        struct trace_kprobe *tk = to_trace_kprobe(ev);
@@ -1175,35 +1335,35 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
                    struct trace_event_file *trace_file)
 {
        struct kprobe_trace_entry_head *entry;
-       struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
-       int size, dsize, pc;
-       unsigned long irq_flags;
        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+       struct trace_event_buffer fbuffer;
+       int dsize;
 
        WARN_ON(call != trace_file->event_call);
 
        if (trace_trigger_soft_disabled(trace_file))
                return;
 
-       local_save_flags(irq_flags);
-       pc = preempt_count();
+       local_save_flags(fbuffer.flags);
+       fbuffer.pc = preempt_count();
+       fbuffer.trace_file = trace_file;
 
        dsize = __get_data_size(&tk->tp, regs);
-       size = sizeof(*entry) + tk->tp.size + dsize;
 
-       event = trace_event_buffer_lock_reserve(&buffer, trace_file,
-                                               call->event.type,
-                                               size, irq_flags, pc);
-       if (!event)
+       fbuffer.event =
+               trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
+                                       call->event.type,
+                                       sizeof(*entry) + tk->tp.size + dsize,
+                                       fbuffer.flags, fbuffer.pc);
+       if (!fbuffer.event)
                return;
 
-       entry = ring_buffer_event_data(event);
+       fbuffer.regs = regs;
+       entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
        entry->ip = (unsigned long)tk->rp.kp.addr;
        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
 
-       event_trigger_unlock_commit_regs(trace_file, buffer, event,
-                                        entry, irq_flags, pc, regs);
+       trace_event_buffer_commit(&fbuffer);
 }
 
 static void
@@ -1223,36 +1383,35 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
                       struct trace_event_file *trace_file)
 {
        struct kretprobe_trace_entry_head *entry;
-       struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
-       int size, pc, dsize;
-       unsigned long irq_flags;
+       struct trace_event_buffer fbuffer;
        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+       int dsize;
 
        WARN_ON(call != trace_file->event_call);
 
        if (trace_trigger_soft_disabled(trace_file))
                return;
 
-       local_save_flags(irq_flags);
-       pc = preempt_count();
+       local_save_flags(fbuffer.flags);
+       fbuffer.pc = preempt_count();
+       fbuffer.trace_file = trace_file;
 
        dsize = __get_data_size(&tk->tp, regs);
-       size = sizeof(*entry) + tk->tp.size + dsize;
-
-       event = trace_event_buffer_lock_reserve(&buffer, trace_file,
-                                               call->event.type,
-                                               size, irq_flags, pc);
-       if (!event)
+       fbuffer.event =
+               trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
+                                       call->event.type,
+                                       sizeof(*entry) + tk->tp.size + dsize,
+                                       fbuffer.flags, fbuffer.pc);
+       if (!fbuffer.event)
                return;
 
-       entry = ring_buffer_event_data(event);
+       fbuffer.regs = regs;
+       entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
        entry->func = (unsigned long)tk->rp.kp.addr;
        entry->ret_ip = (unsigned long)ri->ret_addr;
        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
 
-       event_trigger_unlock_commit_regs(trace_file, buffer, event,
-                                        entry, irq_flags, pc, regs);
+       trace_event_buffer_commit(&fbuffer);
 }
 
 static void
@@ -1698,11 +1857,12 @@ static __init void setup_boot_kprobe_events(void)
        enable_boot_kprobe_events();
 }
 
-/* Make a tracefs interface for controlling probe points */
-static __init int init_kprobe_trace(void)
+/*
+ * Register dynevent at subsys_initcall. This allows kernel to setup kprobe
+ * events in fs_initcall without tracefs.
+ */
+static __init int init_kprobe_trace_early(void)
 {
-       struct dentry *d_tracer;
-       struct dentry *entry;
        int ret;
 
        ret = dyn_event_register(&trace_kprobe_ops);
@@ -1712,6 +1872,16 @@ static __init int init_kprobe_trace(void)
        if (register_module_notifier(&trace_kprobe_module_nb))
                return -EINVAL;
 
+       return 0;
+}
+subsys_initcall(init_kprobe_trace_early);
+
+/* Make a tracefs interface for controlling probe points */
+static __init int init_kprobe_trace(void)
+{
+       struct dentry *d_tracer;
+       struct dentry *entry;
+
        d_tracer = tracing_init_dentry();
        if (IS_ERR(d_tracer))
                return 0;
index b038801..84582bf 100644 (file)
@@ -32,7 +32,7 @@ static void mmio_reset_data(struct trace_array *tr)
        overrun_detected = false;
        prev_overruns = 0;
 
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
 }
 
 static int mmio_trace_init(struct trace_array *tr)
@@ -122,7 +122,7 @@ static void mmio_close(struct trace_iterator *iter)
 static unsigned long count_overruns(struct trace_iterator *iter)
 {
        unsigned long cnt = atomic_xchg(&dropped_count, 0);
-       unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
+       unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer);
 
        if (over > prev_overruns)
                cnt += over - prev_overruns;
@@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
                                struct mmiotrace_rw *rw)
 {
        struct trace_event_call *call = &event_mmiotrace_rw;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
        struct ring_buffer_event *event;
        struct trace_mmiotrace_rw *entry;
        int pc = preempt_count();
@@ -318,7 +318,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
 void mmio_trace_rw(struct mmiotrace_rw *rw)
 {
        struct trace_array *tr = mmio_trace_array;
-       struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
+       struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
        __trace_mmiotrace_rw(tr, data, rw);
 }
 
@@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
                                struct mmiotrace_map *map)
 {
        struct trace_event_call *call = &event_mmiotrace_map;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
        struct ring_buffer_event *event;
        struct trace_mmiotrace_map *entry;
        int pc = preempt_count();
@@ -351,7 +351,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
        struct trace_array_cpu *data;
 
        preempt_disable();
-       data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
+       data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
        __trace_mmiotrace_map(tr, data, map);
        preempt_enable();
 }
index d9b4b7c..b490908 100644 (file)
@@ -538,7 +538,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
        struct trace_array *tr = iter->tr;
        unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
        unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
-       unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
+       unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
        unsigned long long rel_ts = next_ts - iter->ts;
        struct trace_seq *s = &iter->seq;
 
index e288168..e304196 100644 (file)
@@ -89,8 +89,10 @@ static void tracing_sched_unregister(void)
 
 static void tracing_start_sched_switch(int ops)
 {
-       bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
+       bool sched_register;
+
        mutex_lock(&sched_register_mutex);
+       sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
 
        switch (ops) {
        case RECORD_CMDLINE:
index 617e297..97b10bb 100644 (file)
@@ -82,7 +82,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
        if (cpu != wakeup_current_cpu)
                goto out_enable;
 
-       *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       *data = per_cpu_ptr(tr->array_buffer.data, cpu);
        disabled = atomic_inc_return(&(*data)->disabled);
        if (unlikely(disabled != 1))
                goto out;
@@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
                           unsigned long flags, int pc)
 {
        struct trace_event_call *call = &event_context_switch;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
        struct ring_buffer_event *event;
        struct ctx_switch_entry *entry;
 
@@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        struct trace_event_call *call = &event_wakeup;
        struct ring_buffer_event *event;
        struct ctx_switch_entry *entry;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
+       struct trace_buffer *buffer = tr->array_buffer.buffer;
 
        event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
                                          sizeof(*entry), flags, pc);
@@ -459,7 +459,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
 
        /* disable local data, not wakeup_cpu data */
        cpu = raw_smp_processor_id();
-       disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+       disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
        if (likely(disabled != 1))
                goto out;
 
@@ -471,7 +471,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
                goto out_unlock;
 
        /* The task we are waiting for is waking up */
-       data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
+       data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
 
        __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
        tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
@@ -494,7 +494,7 @@ out_unlock:
        arch_spin_unlock(&wakeup_lock);
        local_irq_restore(flags);
 out:
-       atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+       atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
 }
 
 static void __wakeup_reset(struct trace_array *tr)
@@ -513,7 +513,7 @@ static void wakeup_reset(struct trace_array *tr)
 {
        unsigned long flags;
 
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
 
        local_irq_save(flags);
        arch_spin_lock(&wakeup_lock);
@@ -551,7 +551,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
                return;
 
        pc = preempt_count();
-       disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+       disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
        if (unlikely(disabled != 1))
                goto out;
 
@@ -583,7 +583,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
 
        local_save_flags(flags);
 
-       data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
+       data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
        data->preempt_timestamp = ftrace_now(cpu);
        tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
        __trace_stack(wakeup_trace, flags, 0, pc);
@@ -598,7 +598,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
 out_locked:
        arch_spin_unlock(&wakeup_lock);
 out:
-       atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+       atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
 }
 
 static void start_wakeup_tracer(struct trace_array *tr)
index 69ee8ef..b5e3496 100644 (file)
@@ -23,7 +23,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
        return 0;
 }
 
-static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
+static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
 {
        struct ring_buffer_event *event;
        struct trace_entry *entry;
@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  * Test the trace buffer to see if all the elements
  * are still sane.
  */
-static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
+static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
 {
        unsigned long flags, cnt = 0;
        int cpu, ret = 0;
@@ -362,7 +362,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        msleep(100);
 
        /* we should have nothing in the buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
        if (ret)
                goto out;
 
@@ -383,7 +383,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        ftrace_enabled = 0;
 
        /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
 
        ftrace_enabled = 1;
        tracing_start();
@@ -682,7 +682,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
        ftrace_enabled = 0;
 
        /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
 
        ftrace_enabled = 1;
        trace->reset(tr);
@@ -768,7 +768,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
         * Simulate the init() callback but we attach a watchdog callback
         * to detect and recover from possible hangs
         */
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
        set_graph_array(tr);
        ret = register_ftrace_graph(&fgraph_ops);
        if (ret) {
@@ -790,7 +790,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
        tracing_stop();
 
        /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
 
        /* Need to also simulate the tr->reset to remove this fgraph_ops */
        tracing_stop_cmdline_record();
@@ -848,7 +848,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (!ret)
                ret = trace_test_buffer(&tr->max_buffer, &count);
        trace->reset(tr);
@@ -910,7 +910,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (!ret)
                ret = trace_test_buffer(&tr->max_buffer, &count);
        trace->reset(tr);
@@ -976,7 +976,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (ret)
                goto out;
 
@@ -1006,7 +1006,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (ret)
                goto out;
 
@@ -1136,7 +1136,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (!ret)
                ret = trace_test_buffer(&tr->max_buffer, &count);
 
@@ -1177,7 +1177,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
        trace->reset(tr);
        tracing_start();
 
index 87de6ed..1d84fcc 100644 (file)
@@ -30,9 +30,6 @@
 /* How much buffer is left on the trace_seq? */
 #define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq)
 
-/* How much buffer is written? */
-#define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq)
-
 /*
  * trace_seq should work with being initialized with 0s.
  */
index 874f127..d1fa197 100644 (file)
@@ -280,18 +280,22 @@ static int tracing_stat_init(void)
 
        d_tracing = tracing_init_dentry();
        if (IS_ERR(d_tracing))
-               return 0;
+               return -ENODEV;
 
        stat_dir = tracefs_create_dir("trace_stat", d_tracing);
-       if (!stat_dir)
+       if (!stat_dir) {
                pr_warn("Could not create tracefs 'trace_stat' entry\n");
+               return -ENOMEM;
+       }
        return 0;
 }
 
 static int init_stat_file(struct stat_session *session)
 {
-       if (!stat_dir && tracing_stat_init())
-               return -ENODEV;
+       int ret;
+
+       if (!stat_dir && (ret = tracing_stat_init()))
+               return ret;
 
        session->file = tracefs_create_file(session->ts->name, 0644,
                                            stat_dir,
@@ -304,7 +308,7 @@ static int init_stat_file(struct stat_session *session)
 int register_stat_tracer(struct tracer_stat *trace)
 {
        struct stat_session *session, *node;
-       int ret;
+       int ret = -EINVAL;
 
        if (!trace)
                return -EINVAL;
@@ -315,17 +319,15 @@ int register_stat_tracer(struct tracer_stat *trace)
        /* Already registered? */
        mutex_lock(&all_stat_sessions_mutex);
        list_for_each_entry(node, &all_stat_sessions, session_list) {
-               if (node->ts == trace) {
-                       mutex_unlock(&all_stat_sessions_mutex);
-                       return -EINVAL;
-               }
+               if (node->ts == trace)
+                       goto out;
        }
-       mutex_unlock(&all_stat_sessions_mutex);
 
+       ret = -ENOMEM;
        /* Init the session */
        session = kzalloc(sizeof(*session), GFP_KERNEL);
        if (!session)
-               return -ENOMEM;
+               goto out;
 
        session->ts = trace;
        INIT_LIST_HEAD(&session->session_list);
@@ -334,15 +336,16 @@ int register_stat_tracer(struct tracer_stat *trace)
        ret = init_stat_file(session);
        if (ret) {
                destroy_session(session);
-               return ret;
+               goto out;
        }
 
+       ret = 0;
        /* Register */
-       mutex_lock(&all_stat_sessions_mutex);
        list_add_tail(&session->session_list, &all_stat_sessions);
+ out:
        mutex_unlock(&all_stat_sessions_mutex);
 
-       return 0;
+       return ret;
 }
 
 void unregister_stat_tracer(struct tracer_stat *trace)
index 2978c29..d85a2f0 100644 (file)
@@ -297,7 +297,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        struct syscall_trace_enter *entry;
        struct syscall_metadata *sys_data;
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        unsigned long irq_flags;
        unsigned long args[6];
        int pc;
@@ -325,7 +325,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        local_save_flags(irq_flags);
        pc = preempt_count();
 
-       buffer = tr->trace_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer,
                        sys_data->enter_event->event.type, size, irq_flags, pc);
        if (!event)
@@ -347,7 +347,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
        struct syscall_trace_exit *entry;
        struct syscall_metadata *sys_data;
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
+       struct trace_buffer *buffer;
        unsigned long irq_flags;
        int pc;
        int syscall_nr;
@@ -371,7 +371,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
        local_save_flags(irq_flags);
        pc = preempt_count();
 
-       buffer = tr->trace_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer,
                        sys_data->exit_event->event.type, sizeof(*entry),
                        irq_flags, pc);
index 7885ebd..18d16f3 100644 (file)
@@ -931,8 +931,8 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
                                struct trace_event_file *trace_file)
 {
        struct uprobe_trace_entry_head *entry;
+       struct trace_buffer *buffer;
        struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
        void *data;
        int size, esize;
        struct trace_event_call *call = trace_probe_event_call(&tu->tp);
index bc7e563..0cf875f 100644 (file)
@@ -573,6 +573,9 @@ config DIMLIB
 config LIBFDT
        bool
 
+config LIBXBC
+       bool
+
 config OID_REGISTRY
        tristate
        help
index 23ca78d..8bb9117 100644 (file)
@@ -230,6 +230,8 @@ $(foreach file, $(libfdt_files), \
        $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
 lib-$(CONFIG_LIBFDT) += $(libfdt_files)
 
+lib-$(CONFIG_LIBXBC) += bootconfig.o
+
 obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
 obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
 
index 6e175fb..89260aa 100644 (file)
@@ -431,97 +431,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
  * second version by Paul Jackson, third by Joe Korty.
  */
 
-#define CHUNKSZ                                32
-#define nbits_to_hold_value(val)       fls(val)
-#define BASEDEC 10             /* fancier cpuset lists input in decimal */
-
-/**
- * __bitmap_parse - convert an ASCII hex string into a bitmap.
- * @buf: pointer to buffer containing string.
- * @buflen: buffer size in bytes.  If string is smaller than this
- *    then it must be terminated with a \0.
- * @is_user: location of buffer, 0 indicates kernel space
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Commas group hex digits into chunks.  Each chunk defines exactly 32
- * bits of the resultant bitmask.  No chunk may specify a value larger
- * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
- * then leading 0-bits are prepended.  %-EINVAL is returned for illegal
- * characters and for grouping errors such as "1,,5", ",44", "," and "".
- * Leading and trailing whitespace accepted, but not embedded whitespace.
- */
-int __bitmap_parse(const char *buf, unsigned int buflen,
-               int is_user, unsigned long *maskp,
-               int nmaskbits)
-{
-       int c, old_c, totaldigits, ndigits, nchunks, nbits;
-       u32 chunk;
-       const char __user __force *ubuf = (const char __user __force *)buf;
-
-       bitmap_zero(maskp, nmaskbits);
-
-       nchunks = nbits = totaldigits = c = 0;
-       do {
-               chunk = 0;
-               ndigits = totaldigits;
-
-               /* Get the next chunk of the bitmap */
-               while (buflen) {
-                       old_c = c;
-                       if (is_user) {
-                               if (__get_user(c, ubuf++))
-                                       return -EFAULT;
-                       }
-                       else
-                               c = *buf++;
-                       buflen--;
-                       if (isspace(c))
-                               continue;
-
-                       /*
-                        * If the last character was a space and the current
-                        * character isn't '\0', we've got embedded whitespace.
-                        * This is a no-no, so throw an error.
-                        */
-                       if (totaldigits && c && isspace(old_c))
-                               return -EINVAL;
-
-                       /* A '\0' or a ',' signal the end of the chunk */
-                       if (c == '\0' || c == ',')
-                               break;
-
-                       if (!isxdigit(c))
-                               return -EINVAL;
-
-                       /*
-                        * Make sure there are at least 4 free bits in 'chunk'.
-                        * If not, this hexdigit will overflow 'chunk', so
-                        * throw an error.
-                        */
-                       if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
-                               return -EOVERFLOW;
-
-                       chunk = (chunk << 4) | hex_to_bin(c);
-                       totaldigits++;
-               }
-               if (ndigits == totaldigits)
-                       return -EINVAL;
-               if (nchunks == 0 && chunk == 0)
-                       continue;
-
-               __bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits);
-               *maskp |= chunk;
-               nchunks++;
-               nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
-               if (nbits > nmaskbits)
-                       return -EOVERFLOW;
-       } while (buflen && c == ',');
-
-       return 0;
-}
-EXPORT_SYMBOL(__bitmap_parse);
-
 /**
  * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
  *
@@ -530,22 +439,22 @@ EXPORT_SYMBOL(__bitmap_parse);
  *    then it must be terminated with a \0.
  * @maskp: pointer to bitmap array that will contain result.
  * @nmaskbits: size of bitmap, in bits.
- *
- * Wrapper for __bitmap_parse(), providing it with user buffer.
- *
- * We cannot have this as an inline function in bitmap.h because it needs
- * linux/uaccess.h to get the access_ok() declaration and this causes
- * cyclic dependencies.
  */
 int bitmap_parse_user(const char __user *ubuf,
                        unsigned int ulen, unsigned long *maskp,
                        int nmaskbits)
 {
-       if (!access_ok(ubuf, ulen))
-               return -EFAULT;
-       return __bitmap_parse((const char __force *)ubuf,
-                               ulen, 1, maskp, nmaskbits);
+       char *buf;
+       int ret;
 
+       buf = memdup_user_nul(ubuf, ulen);
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
+
+       ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
+
+       kfree(buf);
+       return ret;
 }
 EXPORT_SYMBOL(bitmap_parse_user);
 
@@ -653,6 +562,14 @@ static const char *bitmap_find_region(const char *str)
        return end_of_str(*str) ? NULL : str;
 }
 
+static const char *bitmap_find_region_reverse(const char *start, const char *end)
+{
+       while (start <= end && __end_of_region(*end))
+               end--;
+
+       return end;
+}
+
 static const char *bitmap_parse_region(const char *str, struct region *r)
 {
        str = bitmap_getnum(str, &r->start);
@@ -776,6 +693,80 @@ int bitmap_parselist_user(const char __user *ubuf,
 }
 EXPORT_SYMBOL(bitmap_parselist_user);
 
+static const char *bitmap_get_x32_reverse(const char *start,
+                                       const char *end, u32 *num)
+{
+       u32 ret = 0;
+       int c, i;
+
+       for (i = 0; i < 32; i += 4) {
+               c = hex_to_bin(*end--);
+               if (c < 0)
+                       return ERR_PTR(-EINVAL);
+
+               ret |= c << i;
+
+               if (start > end || __end_of_region(*end))
+                       goto out;
+       }
+
+       if (hex_to_bin(*end--) >= 0)
+               return ERR_PTR(-EOVERFLOW);
+out:
+       *num = ret;
+       return end;
+}
+
+/**
+ * bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @start: pointer to buffer containing string.
+ * @buflen: buffer size in bytes.  If string is smaller than this
+ *    then it must be terminated with a \0 or \n. In that case,
+ *    UINT_MAX may be provided instead of string length.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Commas group hex digits into chunks.  Each chunk defines exactly 32
+ * bits of the resultant bitmask.  No chunk may specify a value larger
+ * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended.  %-EINVAL is returned for illegal
+ * characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
+ * Leading, embedded and trailing whitespace accepted.
+ */
+int bitmap_parse(const char *start, unsigned int buflen,
+               unsigned long *maskp, int nmaskbits)
+{
+       const char *end = strnchrnul(start, buflen, '\n') - 1;
+       int chunks = BITS_TO_U32(nmaskbits);
+       u32 *bitmap = (u32 *)maskp;
+       int unset_bit;
+
+       while (1) {
+               end = bitmap_find_region_reverse(start, end);
+               if (start > end)
+                       break;
+
+               if (!chunks--)
+                       return -EOVERFLOW;
+
+               end = bitmap_get_x32_reverse(start, end, bitmap++);
+               if (IS_ERR(end))
+                       return PTR_ERR(end);
+       }
+
+       unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
+       if (unset_bit < nmaskbits) {
+               bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
+               return 0;
+       }
+
+       if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
+               return -EOVERFLOW;
+
+       return 0;
+}
+EXPORT_SYMBOL(bitmap_parse);
+
 
 #ifdef CONFIG_NUMA
 /**
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
new file mode 100644 (file)
index 0000000..afb2e76
--- /dev/null
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extra Boot Config
+ * Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#define pr_fmt(fmt)    "bootconfig: " fmt
+
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/bootconfig.h>
+#include <linux/string.h>
+
+/*
+ * Extra Boot Config (XBC) is given as tree-structured ascii text of
+ * key-value pairs on memory.
+ * xbc_parse() parses the text to build a simple tree. Each tree node is
+ * simply a key word or a value. A key node may have a next key node or/and
+ * a child node (both key and value). A value node may have a next value
+ * node (for array).
+ */
+
+static struct xbc_node xbc_nodes[XBC_NODE_MAX] __initdata;
+static int xbc_node_num __initdata;
+static char *xbc_data __initdata;
+static size_t xbc_data_size __initdata;
+static struct xbc_node *last_parent __initdata;
+
+static int __init xbc_parse_error(const char *msg, const char *p)
+{
+       int pos = p - xbc_data;
+
+       pr_err("Parse error at pos %d: %s\n", pos, msg);
+       return -EINVAL;
+}
+
+/**
+ * xbc_root_node() - Get the root node of extended boot config
+ *
+ * Return the address of root node of extended boot config. If the
+ * extended boot config is not initiized, return NULL.
+ */
+struct xbc_node * __init xbc_root_node(void)
+{
+       if (unlikely(!xbc_data))
+               return NULL;
+
+       return xbc_nodes;
+}
+
+/**
+ * xbc_node_index() - Get the index of XBC node
+ * @node: A target node of getting index.
+ *
+ * Return the index number of @node in XBC node list.
+ */
+int __init xbc_node_index(struct xbc_node *node)
+{
+       return node - &xbc_nodes[0];
+}
+
+/**
+ * xbc_node_get_parent() - Get the parent XBC node
+ * @node: An XBC node.
+ *
+ * Return the parent node of @node. If the node is top node of the tree,
+ * return NULL.
+ */
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node)
+{
+       return node->parent == XBC_NODE_MAX ? NULL : &xbc_nodes[node->parent];
+}
+
+/**
+ * xbc_node_get_child() - Get the child XBC node
+ * @node: An XBC node.
+ *
+ * Return the first child node of @node. If the node has no child, return
+ * NULL.
+ */
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node)
+{
+       return node->child ? &xbc_nodes[node->child] : NULL;
+}
+
+/**
+ * xbc_node_get_next() - Get the next sibling XBC node
+ * @node: An XBC node.
+ *
+ * Return the NEXT sibling node of @node. If the node has no next sibling,
+ * return NULL. Note that even if this returns NULL, it doesn't mean @node
+ * has no siblings. (You also has to check whether the parent's child node
+ * is @node or not.)
+ */
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node)
+{
+       return node->next ? &xbc_nodes[node->next] : NULL;
+}
+
+/**
+ * xbc_node_get_data() - Get the data of XBC node
+ * @node: An XBC node.
+ *
+ * Return the data (which is always a null terminated string) of @node.
+ * If the node has invalid data, warn and return NULL.
+ */
+const char * __init xbc_node_get_data(struct xbc_node *node)
+{
+       int offset = node->data & ~XBC_VALUE;
+
+       if (WARN_ON(offset >= xbc_data_size))
+               return NULL;
+
+       return xbc_data + offset;
+}
+
+static bool __init
+xbc_node_match_prefix(struct xbc_node *node, const char **prefix)
+{
+       const char *p = xbc_node_get_data(node);
+       int len = strlen(p);
+
+       if (strncmp(*prefix, p, len))
+               return false;
+
+       p = *prefix + len;
+       if (*p == '.')
+               p++;
+       else if (*p != '\0')
+               return false;
+       *prefix = p;
+
+       return true;
+}
+
+/**
+ * xbc_node_find_child() - Find a child node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ *
+ * Search a node under @parent which matches @key. The @key can contain
+ * several words jointed with '.'. If @parent is NULL, this searches the
+ * node from whole tree. Return NULL if no node is matched.
+ */
+struct xbc_node * __init
+xbc_node_find_child(struct xbc_node *parent, const char *key)
+{
+       struct xbc_node *node;
+
+       if (parent)
+               node = xbc_node_get_child(parent);
+       else
+               node = xbc_root_node();
+
+       while (node && xbc_node_is_key(node)) {
+               if (!xbc_node_match_prefix(node, &key))
+                       node = xbc_node_get_next(node);
+               else if (*key != '\0')
+                       node = xbc_node_get_child(node);
+               else
+                       break;
+       }
+
+       return node;
+}
+
+/**
+ * xbc_node_find_value() - Find a value node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ * @vnode: A container pointer of found XBC node.
+ *
+ * Search a value node under @parent whose (parent) key node matches @key,
+ * store it in *@vnode, and returns the value string.
+ * The @key can contain several words jointed with '.'. If @parent is NULL,
+ * this searches the node from whole tree. Return the value string if a
+ * matched key found, return NULL if no node is matched.
+ * Note that this returns 0-length string and stores NULL in *@vnode if the
+ * key has no value. And also it will return the value of the first entry if
+ * the value is an array.
+ */
+const char * __init
+xbc_node_find_value(struct xbc_node *parent, const char *key,
+                   struct xbc_node **vnode)
+{
+       struct xbc_node *node = xbc_node_find_child(parent, key);
+
+       if (!node || !xbc_node_is_key(node))
+               return NULL;
+
+       node = xbc_node_get_child(node);
+       if (node && !xbc_node_is_value(node))
+               return NULL;
+
+       if (vnode)
+               *vnode = node;
+
+       return node ? xbc_node_get_data(node) : "";
+}
+
+/**
+ * xbc_node_compose_key_after() - Compose partial key string of the XBC node
+ * @root: Root XBC node
+ * @node: Target XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the partial key of the @node into @buf, which is starting right
+ * after @root (@root is not included.) If @root is NULL, this returns full
+ * key words of @node.
+ * Returns the total length of the key stored in @buf. Returns -EINVAL
+ * if @node is NULL or @root is not the ancestor of @node or @root is @node,
+ * or returns -ERANGE if the key depth is deeper than max depth.
+ * This is expected to be used with xbc_find_node() to list up all (child)
+ * keys under given key.
+ */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+                                     struct xbc_node *node,
+                                     char *buf, size_t size)
+{
+       u16 keys[XBC_DEPTH_MAX];
+       int depth = 0, ret = 0, total = 0;
+
+       if (!node || node == root)
+               return -EINVAL;
+
+       if (xbc_node_is_value(node))
+               node = xbc_node_get_parent(node);
+
+       while (node && node != root) {
+               keys[depth++] = xbc_node_index(node);
+               if (depth == XBC_DEPTH_MAX)
+                       return -ERANGE;
+               node = xbc_node_get_parent(node);
+       }
+       if (!node && root)
+               return -EINVAL;
+
+       while (--depth >= 0) {
+               node = xbc_nodes + keys[depth];
+               ret = snprintf(buf, size, "%s%s", xbc_node_get_data(node),
+                              depth ? "." : "");
+               if (ret < 0)
+                       return ret;
+               if (ret > size) {
+                       size = 0;
+               } else {
+                       size -= ret;
+                       buf += ret;
+               }
+               total += ret;
+       }
+
+       return total;
+}
+
+/**
+ * xbc_node_find_next_leaf() - Find the next leaf node under given node
+ * @root: An XBC root node
+ * @node: An XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of @node
+ * under @root node (including @root node itself).
+ * Return the next node or NULL if next leaf node is not found.
+ */
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+                                                struct xbc_node *node)
+{
+       if (unlikely(!xbc_data))
+               return NULL;
+
+       if (!node) {    /* First try */
+               node = root;
+               if (!node)
+                       node = xbc_nodes;
+       } else {
+               if (node == root)       /* @root was a leaf, no child node. */
+                       return NULL;
+
+               while (!node->next) {
+                       node = xbc_node_get_parent(node);
+                       if (node == root)
+                               return NULL;
+                       /* User passed a node which is not uder parent */
+                       if (WARN_ON(!node))
+                               return NULL;
+               }
+               node = xbc_node_get_next(node);
+       }
+
+       while (node && !xbc_node_is_leaf(node))
+               node = xbc_node_get_child(node);
+
+       return node;
+}
+
+/**
+ * xbc_node_find_next_key_value() - Find the next key-value pair nodes
+ * @root: An XBC root node
+ * @leaf: A container pointer of XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of *@leaf
+ * under @root node. Returns the value and update *@leaf if next leaf node
+ * is found, or NULL if no next leaf node is found.
+ * Note that this returns 0-length string if the key has no value, or
+ * the value of the first entry if the value is an array.
+ */
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+                                                struct xbc_node **leaf)
+{
+       /* tip must be passed */
+       if (WARN_ON(!leaf))
+               return NULL;
+
+       *leaf = xbc_node_find_next_leaf(root, *leaf);
+       if (!*leaf)
+               return NULL;
+       if ((*leaf)->child)
+               return xbc_node_get_data(xbc_node_get_child(*leaf));
+       else
+               return "";      /* No value key */
+}
+
+/* XBC parse and tree build */
+
+static struct xbc_node * __init xbc_add_node(char *data, u32 flag)
+{
+       struct xbc_node *node;
+       unsigned long offset;
+
+       if (xbc_node_num == XBC_NODE_MAX)
+               return NULL;
+
+       node = &xbc_nodes[xbc_node_num++];
+       offset = data - xbc_data;
+       node->data = (u16)offset;
+       if (WARN_ON(offset >= XBC_DATA_MAX))
+               return NULL;
+       node->data |= flag;
+       node->child = 0;
+       node->next = 0;
+
+       return node;
+}
+
+static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
+{
+       while (node->next)
+               node = xbc_node_get_next(node);
+
+       return node;
+}
+
+static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+{
+       struct xbc_node *sib, *node = xbc_add_node(data, flag);
+
+       if (node) {
+               if (!last_parent) {
+                       node->parent = XBC_NODE_MAX;
+                       sib = xbc_last_sibling(xbc_nodes);
+                       sib->next = xbc_node_index(node);
+               } else {
+                       node->parent = xbc_node_index(last_parent);
+                       if (!last_parent->child) {
+                               last_parent->child = xbc_node_index(node);
+                       } else {
+                               sib = xbc_node_get_child(last_parent);
+                               sib = xbc_last_sibling(sib);
+                               sib->next = xbc_node_index(node);
+                       }
+               }
+       } else
+               xbc_parse_error("Too many nodes", data);
+
+       return node;
+}
+
+static inline __init struct xbc_node *xbc_add_child(char *data, u32 flag)
+{
+       struct xbc_node *node = xbc_add_sibling(data, flag);
+
+       if (node)
+               last_parent = node;
+
+       return node;
+}
+
+static inline __init bool xbc_valid_keyword(char *key)
+{
+       if (key[0] == '\0')
+               return false;
+
+       while (isalnum(*key) || *key == '-' || *key == '_')
+               key++;
+
+       return *key == '\0';
+}
+
+static char *skip_comment(char *p)
+{
+       char *ret;
+
+       ret = strchr(p, '\n');
+       if (!ret)
+               ret = p + strlen(p);
+       else
+               ret++;
+
+       return ret;
+}
+
+static char *skip_spaces_until_newline(char *p)
+{
+       while (isspace(*p) && *p != '\n')
+               p++;
+       return p;
+}
+
+static int __init __xbc_open_brace(void)
+{
+       /* Mark the last key as open brace */
+       last_parent->next = XBC_NODE_MAX;
+
+       return 0;
+}
+
+static int __init __xbc_close_brace(char *p)
+{
+       struct xbc_node *node;
+
+       if (!last_parent || last_parent->next != XBC_NODE_MAX)
+               return xbc_parse_error("Unexpected closing brace", p);
+
+       node = last_parent;
+       node->next = 0;
+       do {
+               node = xbc_node_get_parent(node);
+       } while (node && node->next != XBC_NODE_MAX);
+       last_parent = node;
+
+       return 0;
+}
+
+/*
+ * Return delimiter or error, no node added. As same as lib/cmdline.c,
+ * you can use " around spaces, but can't escape " for value.
+ */
+static int __init __xbc_parse_value(char **__v, char **__n)
+{
+       char *p, *v = *__v;
+       int c, quotes = 0;
+
+       v = skip_spaces(v);
+       while (*v == '#') {
+               v = skip_comment(v);
+               v = skip_spaces(v);
+       }
+       if (*v == '"' || *v == '\'') {
+               quotes = *v;
+               v++;
+       }
+       p = v - 1;
+       while ((c = *++p)) {
+               if (!isprint(c) && !isspace(c))
+                       return xbc_parse_error("Non printable value", p);
+               if (quotes) {
+                       if (c != quotes)
+                               continue;
+                       quotes = 0;
+                       *p++ = '\0';
+                       p = skip_spaces_until_newline(p);
+                       c = *p;
+                       if (c && !strchr(",;\n#}", c))
+                               return xbc_parse_error("No value delimiter", p);
+                       if (*p)
+                               p++;
+                       break;
+               }
+               if (strchr(",;\n#}", c)) {
+                       v = strim(v);
+                       *p++ = '\0';
+                       break;
+               }
+       }
+       if (quotes)
+               return xbc_parse_error("No closing quotes", p);
+       if (c == '#') {
+               p = skip_comment(p);
+               c = '\n';       /* A comment must be treated as a newline */
+       }
+       *__n = p;
+       *__v = v;
+
+       return c;
+}
+
+static int __init xbc_parse_array(char **__v)
+{
+       struct xbc_node *node;
+       char *next;
+       int c = 0;
+
+       do {
+               c = __xbc_parse_value(__v, &next);
+               if (c < 0)
+                       return c;
+
+               node = xbc_add_sibling(*__v, XBC_VALUE);
+               if (!node)
+                       return -ENOMEM;
+               *__v = next;
+       } while (c == ',');
+       node->next = 0;
+
+       return c;
+}
+
+static inline __init
+struct xbc_node *find_match_node(struct xbc_node *node, char *k)
+{
+       while (node) {
+               if (!strcmp(xbc_node_get_data(node), k))
+                       break;
+               node = xbc_node_get_next(node);
+       }
+       return node;
+}
+
+static int __init __xbc_add_key(char *k)
+{
+       struct xbc_node *node;
+
+       if (!xbc_valid_keyword(k))
+               return xbc_parse_error("Invalid keyword", k);
+
+       if (unlikely(xbc_node_num == 0))
+               goto add_node;
+
+       if (!last_parent)       /* the first level */
+               node = find_match_node(xbc_nodes, k);
+       else
+               node = find_match_node(xbc_node_get_child(last_parent), k);
+
+       if (node)
+               last_parent = node;
+       else {
+add_node:
+               node = xbc_add_child(k, XBC_KEY);
+               if (!node)
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
+static int __init __xbc_parse_keys(char *k)
+{
+       char *p;
+       int ret;
+
+       k = strim(k);
+       while ((p = strchr(k, '.'))) {
+               *p++ = '\0';
+               ret = __xbc_add_key(k);
+               if (ret)
+                       return ret;
+               k = p;
+       }
+
+       return __xbc_add_key(k);
+}
+
+static int __init xbc_parse_kv(char **k, char *v)
+{
+       struct xbc_node *prev_parent = last_parent;
+       struct xbc_node *node;
+       char *next;
+       int c, ret;
+
+       ret = __xbc_parse_keys(*k);
+       if (ret)
+               return ret;
+
+       c = __xbc_parse_value(&v, &next);
+       if (c < 0)
+               return c;
+
+       node = xbc_add_sibling(v, XBC_VALUE);
+       if (!node)
+               return -ENOMEM;
+
+       if (c == ',') { /* Array */
+               c = xbc_parse_array(&next);
+               if (c < 0)
+                       return c;
+       }
+
+       last_parent = prev_parent;
+
+       if (c == '}') {
+               ret = __xbc_close_brace(next - 1);
+               if (ret < 0)
+                       return ret;
+       }
+
+       *k = next;
+
+       return 0;
+}
+
+static int __init xbc_parse_key(char **k, char *n)
+{
+       struct xbc_node *prev_parent = last_parent;
+       int ret;
+
+       *k = strim(*k);
+       if (**k != '\0') {
+               ret = __xbc_parse_keys(*k);
+               if (ret)
+                       return ret;
+               last_parent = prev_parent;
+       }
+       *k = n;
+
+       return 0;
+}
+
+static int __init xbc_open_brace(char **k, char *n)
+{
+       int ret;
+
+       ret = __xbc_parse_keys(*k);
+       if (ret)
+               return ret;
+       *k = n;
+
+       return __xbc_open_brace();
+}
+
+static int __init xbc_close_brace(char **k, char *n)
+{
+       int ret;
+
+       ret = xbc_parse_key(k, n);
+       if (ret)
+               return ret;
+       /* k is updated in xbc_parse_key() */
+
+       return __xbc_close_brace(n - 1);
+}
+
+static int __init xbc_verify_tree(void)
+{
+       int i, depth, len, wlen;
+       struct xbc_node *n, *m;
+
+       /* Empty tree */
+       if (xbc_node_num == 0) {
+               xbc_parse_error("Empty config", xbc_data);
+               return -ENOENT;
+       }
+
+       for (i = 0; i < xbc_node_num; i++) {
+               if (xbc_nodes[i].next > xbc_node_num) {
+                       return xbc_parse_error("No closing brace",
+                               xbc_node_get_data(xbc_nodes + i));
+               }
+       }
+
+       /* Key tree limitation check */
+       n = &xbc_nodes[0];
+       depth = 1;
+       len = 0;
+
+       while (n) {
+               wlen = strlen(xbc_node_get_data(n)) + 1;
+               len += wlen;
+               if (len > XBC_KEYLEN_MAX)
+                       return xbc_parse_error("Too long key length",
+                               xbc_node_get_data(n));
+
+               m = xbc_node_get_child(n);
+               if (m && xbc_node_is_key(m)) {
+                       n = m;
+                       depth++;
+                       if (depth > XBC_DEPTH_MAX)
+                               return xbc_parse_error("Too many key words",
+                                               xbc_node_get_data(n));
+                       continue;
+               }
+               len -= wlen;
+               m = xbc_node_get_next(n);
+               while (!m) {
+                       n = xbc_node_get_parent(n);
+                       if (!n)
+                               break;
+                       len -= strlen(xbc_node_get_data(n)) + 1;
+                       depth--;
+                       m = xbc_node_get_next(n);
+               }
+               n = m;
+       }
+
+       return 0;
+}
+
+/**
+ * xbc_destroy_all() - Clean up all parsed bootconfig
+ *
+ * This clears all data structures of parsed bootconfig on memory.
+ * If you need to reuse xbc_init() with new boot config, you can
+ * use this.
+ */
+void __init xbc_destroy_all(void)
+{
+       xbc_data = NULL;
+       xbc_data_size = 0;
+       xbc_node_num = 0;
+       memset(xbc_nodes, 0, sizeof(xbc_nodes));
+}
+
+/**
+ * xbc_init() - Parse given XBC file and build XBC internal tree
+ * @buf: boot config text
+ *
+ * This parses the boot config text in @buf. @buf must be a
+ * null terminated string and smaller than XBC_DATA_MAX.
+ * Return the number of stored nodes (>0) if succeeded, or -errno
+ * if there is any error.
+ */
+int __init xbc_init(char *buf)
+{
+       char *p, *q;
+       int ret, c;
+
+       if (xbc_data) {
+               pr_err("Error: bootconfig is already initialized.\n");
+               return -EBUSY;
+       }
+
+       ret = strlen(buf);
+       if (ret > XBC_DATA_MAX - 1 || ret == 0) {
+               pr_err("Error: Config data is %s.\n",
+                       ret ? "too big" : "empty");
+               return -ERANGE;
+       }
+
+       xbc_data = buf;
+       xbc_data_size = ret + 1;
+       last_parent = NULL;
+
+       p = buf;
+       do {
+               q = strpbrk(p, "{}=;\n#");
+               if (!q) {
+                       p = skip_spaces(p);
+                       if (*p != '\0')
+                               ret = xbc_parse_error("No delimiter", p);
+                       break;
+               }
+
+               c = *q;
+               *q++ = '\0';
+               switch (c) {
+               case '=':
+                       ret = xbc_parse_kv(&p, q);
+                       break;
+               case '{':
+                       ret = xbc_open_brace(&p, q);
+                       break;
+               case '#':
+                       q = skip_comment(q);
+                       /* fall through */
+               case ';':
+               case '\n':
+                       ret = xbc_parse_key(&p, q);
+                       break;
+               case '}':
+                       ret = xbc_close_brace(&p, q);
+                       break;
+               }
+       } while (!ret);
+
+       if (!ret)
+               ret = xbc_verify_tree();
+
+       if (ret < 0)
+               xbc_destroy_all();
+       else
+               ret = xbc_node_num;
+
+       return ret;
+}
+
+/**
+ * xbc_debug_dump() - Dump current XBC node list
+ *
+ * Dump the current XBC node list on printk buffer for debug.
+ */
+void __init xbc_debug_dump(void)
+{
+       int i;
+
+       for (i = 0; i < xbc_node_num; i++) {
+               pr_debug("[%d] %s (%s) .next=%d, .child=%d .parent=%d\n", i,
+                       xbc_node_get_data(xbc_nodes + i),
+                       xbc_node_is_value(xbc_nodes + i) ? "value" : "key",
+                       xbc_nodes[i].next, xbc_nodes[i].child,
+                       xbc_nodes[i].parent);
+       }
+}
index 08ec58c..f607b96 100644 (file)
@@ -434,6 +434,23 @@ char *strchrnul(const char *s, int c)
 EXPORT_SYMBOL(strchrnul);
 #endif
 
+/**
+ * strnchrnul - Find and return a character in a length limited string,
+ * or end of string
+ * @s: The string to be searched
+ * @count: The number of characters to be searched
+ * @c: The character to search for
+ *
+ * Returns pointer to the first occurrence of 'c' in s. If c is not found,
+ * then return a pointer to the last character of the string.
+ */
+char *strnchrnul(const char *s, size_t count, int c)
+{
+       while (count-- && *s && *s != (char)c)
+               s++;
+       return (char *)s;
+}
+
 #ifndef __HAVE_ARCH_STRRCHR
 /**
  * strrchr - Find the last occurrence of a character in a string
index 71ec3af..61ed71c 100644 (file)
@@ -295,7 +295,8 @@ static void __init test_replace(void)
        expect_eq_bitmap(bmap, exp3_1_0, nbits);
 }
 
-#define PARSE_TIME 0x1
+#define PARSE_TIME     0x1
+#define NO_LEN         0x2
 
 struct test_bitmap_parselist{
        const int errno;
@@ -349,7 +350,6 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
        {-EINVAL, "0-31:a/1", NULL, 8, 0},
        {-EINVAL, "0-\n", NULL, 8, 0},
 
-#undef step
 };
 
 static void __init __test_bitmap_parselist(int is_user)
@@ -401,6 +401,95 @@ static void __init __test_bitmap_parselist(int is_user)
        }
 }
 
+static const unsigned long parse_test[] __initconst = {
+       BITMAP_FROM_U64(0),
+       BITMAP_FROM_U64(1),
+       BITMAP_FROM_U64(0xdeadbeef),
+       BITMAP_FROM_U64(0x100000000ULL),
+};
+
+static const unsigned long parse_test2[] __initconst = {
+       BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xdeadbeef),
+       BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xbaadf00ddeadbeef),
+       BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0x0badf00ddeadbeef),
+};
+
+static const struct test_bitmap_parselist parse_tests[] __initconst = {
+       {0, "",                         &parse_test[0 * step], 32, 0},
+       {0, " ",                        &parse_test[0 * step], 32, 0},
+       {0, "0",                        &parse_test[0 * step], 32, 0},
+       {0, "0\n",                      &parse_test[0 * step], 32, 0},
+       {0, "1",                        &parse_test[1 * step], 32, 0},
+       {0, "deadbeef",                 &parse_test[2 * step], 32, 0},
+       {0, "1,0",                      &parse_test[3 * step], 33, 0},
+       {0, "deadbeef,\n,0,1",          &parse_test[2 * step], 96, 0},
+
+       {0, "deadbeef,1,0",             &parse_test2[0 * 2 * step], 96, 0},
+       {0, "baadf00d,deadbeef,1,0",    &parse_test2[1 * 2 * step], 128, 0},
+       {0, "badf00d,deadbeef,1,0",     &parse_test2[2 * 2 * step], 124, 0},
+       {0, "badf00d,deadbeef,1,0",     &parse_test2[2 * 2 * step], 124, NO_LEN},
+       {0, "  badf00d,deadbeef,1,0  ", &parse_test2[2 * 2 * step], 124, 0},
+       {0, " , badf00d,deadbeef,1,0 , ",       &parse_test2[2 * 2 * step], 124, 0},
+       {0, " , badf00d, ,, ,,deadbeef,1,0 , ", &parse_test2[2 * 2 * step], 124, 0},
+
+       {-EINVAL,    "goodfood,deadbeef,1,0",   NULL, 128, 0},
+       {-EOVERFLOW, "3,0",                     NULL, 33, 0},
+       {-EOVERFLOW, "123badf00d,deadbeef,1,0", NULL, 128, 0},
+       {-EOVERFLOW, "badf00d,deadbeef,1,0",    NULL, 90, 0},
+       {-EOVERFLOW, "fbadf00d,deadbeef,1,0",   NULL, 95, 0},
+       {-EOVERFLOW, "badf00d,deadbeef,1,0",    NULL, 100, 0},
+#undef step
+};
+
+static void __init __test_bitmap_parse(int is_user)
+{
+       int i;
+       int err;
+       ktime_t time;
+       DECLARE_BITMAP(bmap, 2048);
+       char *mode = is_user ? "_user"  : "";
+
+       for (i = 0; i < ARRAY_SIZE(parse_tests); i++) {
+               struct test_bitmap_parselist test = parse_tests[i];
+
+               if (is_user) {
+                       size_t len = strlen(test.in);
+                       mm_segment_t orig_fs = get_fs();
+
+                       set_fs(KERNEL_DS);
+                       time = ktime_get();
+                       err = bitmap_parse_user((__force const char __user *)test.in, len,
+                                               bmap, test.nbits);
+                       time = ktime_get() - time;
+                       set_fs(orig_fs);
+               } else {
+                       size_t len = test.flags & NO_LEN ?
+                               UINT_MAX : strlen(test.in);
+                       time = ktime_get();
+                       err = bitmap_parse(test.in, len, bmap, test.nbits);
+                       time = ktime_get() - time;
+               }
+
+               if (err != test.errno) {
+                       pr_err("parse%s: %d: input is %s, errno is %d, expected %d\n",
+                                       mode, i, test.in, err, test.errno);
+                       continue;
+               }
+
+               if (!err && test.expected
+                        && !__bitmap_equal(bmap, test.expected, test.nbits)) {
+                       pr_err("parse%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+                                       mode, i, test.in, bmap[0],
+                                       *test.expected);
+                       continue;
+               }
+
+               if (test.flags & PARSE_TIME)
+                       pr_err("parse%s: %d: input is '%s' OK, Time: %llu\n",
+                                       mode, i, test.in, time);
+       }
+}
+
 static void __init test_bitmap_parselist(void)
 {
        __test_bitmap_parselist(0);
@@ -411,6 +500,16 @@ static void __init test_bitmap_parselist_user(void)
        __test_bitmap_parselist(1);
 }
 
+static void __init test_bitmap_parse(void)
+{
+       __test_bitmap_parse(0);
+}
+
+static void __init test_bitmap_parse_user(void)
+{
+       __test_bitmap_parse(1);
+}
+
 #define EXP1_IN_BITS   (sizeof(exp1) * 8)
 
 static void __init test_bitmap_arr32(void)
@@ -516,6 +615,8 @@ static void __init selftest(void)
        test_copy();
        test_replace();
        test_bitmap_arr32();
+       test_bitmap_parse();
+       test_bitmap_parse_user();
        test_bitmap_parselist();
        test_bitmap_parselist_user();
        test_mem_optimisations();
index 327b3eb..0271b22 100644 (file)
@@ -117,3 +117,24 @@ config DEBUG_RODATA_TEST
     depends on STRICT_KERNEL_RWX
     ---help---
       This option enables a testcase for the setting rodata read-only.
+
+config GENERIC_PTDUMP
+       bool
+
+config PTDUMP_CORE
+       bool
+
+config PTDUMP_DEBUGFS
+       bool "Export kernel pagetable layout to userspace via debugfs"
+       depends on DEBUG_KERNEL
+       depends on DEBUG_FS
+       depends on GENERIC_PTDUMP
+       select PTDUMP_CORE
+       help
+         Say Y here if you want to show the kernel pagetable layout in a
+         debugfs file. This information is only useful for kernel developers
+         who are working in architecture specific areas of the kernel.
+         It is probably not a good idea to enable this feature in a production
+         kernel.
+
+         If in doubt, say N.
index 32f08e2..272e660 100644 (file)
@@ -109,3 +109,4 @@ obj-$(CONFIG_ZONE_DEVICE) += memremap.o
 obj-$(CONFIG_HMM_MIRROR) += hmm.o
 obj-$(CONFIG_MEMFD_CREATE) += memfd.o
 obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
+obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
index e13f4d2..1b521e0 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1792,7 +1792,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  * Before activating this code, please be aware that the following assumptions
  * are currently made:
  *
- *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
+ *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
  *  free pages containing page tables or TLB flushing requires IPI broadcast.
  *
  *  *) ptes can be read atomically by the architecture.
index d379cb6..72e5a6d 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -186,7 +186,7 @@ static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
 }
 
 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
-                            struct mm_walk *walk)
+                            __always_unused int depth, struct mm_walk *walk)
 {
        struct hmm_vma_walk *hmm_vma_walk = walk->private;
        struct hmm_range *range = hmm_vma_walk->range;
@@ -380,7 +380,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 again:
        pmd = READ_ONCE(*pmdp);
        if (pmd_none(pmd))
-               return hmm_vma_walk_hole(start, end, walk);
+               return hmm_vma_walk_hole(start, end, -1, walk);
 
        if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
                bool fault, write_fault;
@@ -474,23 +474,32 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 {
        struct hmm_vma_walk *hmm_vma_walk = walk->private;
        struct hmm_range *range = hmm_vma_walk->range;
-       unsigned long addr = start, next;
-       pmd_t *pmdp;
+       unsigned long addr = start;
        pud_t pud;
-       int ret;
+       int ret = 0;
+       spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
+
+       if (!ptl)
+               return 0;
+
+       /* Normally we don't want to split the huge page */
+       walk->action = ACTION_CONTINUE;
 
-again:
        pud = READ_ONCE(*pudp);
-       if (pud_none(pud))
-               return hmm_vma_walk_hole(start, end, walk);
+       if (pud_none(pud)) {
+               ret = hmm_vma_walk_hole(start, end, -1, walk);
+               goto out_unlock;
+       }
 
        if (pud_huge(pud) && pud_devmap(pud)) {
                unsigned long i, npages, pfn;
                uint64_t *pfns, cpu_flags;
                bool fault, write_fault;
 
-               if (!pud_present(pud))
-                       return hmm_vma_walk_hole(start, end, walk);
+               if (!pud_present(pud)) {
+                       ret = hmm_vma_walk_hole(start, end, -1, walk);
+                       goto out_unlock;
+               }
 
                i = (addr - range->start) >> PAGE_SHIFT;
                npages = (end - addr) >> PAGE_SHIFT;
@@ -499,16 +508,20 @@ again:
                cpu_flags = pud_to_hmm_pfn_flags(range, pud);
                hmm_range_need_fault(hmm_vma_walk, pfns, npages,
                                     cpu_flags, &fault, &write_fault);
-               if (fault || write_fault)
-                       return hmm_vma_walk_hole_(addr, end, fault,
-                                               write_fault, walk);
+               if (fault || write_fault) {
+                       ret = hmm_vma_walk_hole_(addr, end, fault,
+                                                write_fault, walk);
+                       goto out_unlock;
+               }
 
                pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
                for (i = 0; i < npages; ++i, ++pfn) {
                        hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
                                              hmm_vma_walk->pgmap);
-                       if (unlikely(!hmm_vma_walk->pgmap))
-                               return -EBUSY;
+                       if (unlikely(!hmm_vma_walk->pgmap)) {
+                               ret = -EBUSY;
+                               goto out_unlock;
+                       }
                        pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
                                  cpu_flags;
                }
@@ -517,22 +530,15 @@ again:
                        hmm_vma_walk->pgmap = NULL;
                }
                hmm_vma_walk->last = end;
-               return 0;
+               goto out_unlock;
        }
 
-       split_huge_pud(walk->vma, pudp, addr);
-       if (pud_none(*pudp))
-               goto again;
+       /* Ask for the PUD to be split */
+       walk->action = ACTION_SUBTREE;
 
-       pmdp = pmd_offset(pudp, addr);
-       do {
-               next = pmd_addr_end(addr, end);
-               ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
-               if (ret)
-                       return ret;
-       } while (pmdp++, addr = next, addr != end);
-
-       return 0;
+out_unlock:
+       spin_unlock(ptl);
+       return ret;
 }
 #else
 #define hmm_vma_walk_pud       NULL
index 1c4be87..0bccc62 100644 (file)
@@ -1664,6 +1664,9 @@ out_unlock:
  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
  * impractical.
  *
+ * See vmf_insert_mixed_prot() for a discussion of the implication of using
+ * a value of @pgprot different from that of @vma->vm_page_prot.
+ *
  * Context: Process context.  May allocate using %GFP_KERNEL.
  * Return: vm_fault_t value.
  */
@@ -1737,9 +1740,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
 }
 
 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
-               unsigned long addr, pfn_t pfn, bool mkwrite)
+               unsigned long addr, pfn_t pfn, pgprot_t pgprot,
+               bool mkwrite)
 {
-       pgprot_t pgprot = vma->vm_page_prot;
        int err;
 
        BUG_ON(!vm_mixed_ok(vma, pfn));
@@ -1782,10 +1785,43 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
        return VM_FAULT_NOPAGE;
 }
 
+/**
+ * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ * @pgprot: pgprot flags for the inserted page
+ *
+ * This is exactly like vmf_insert_mixed(), except that it allows drivers to
+ * to override pgprot on a per-page basis.
+ *
+ * Typically this function should be used by drivers to set caching- and
+ * encryption bits different than those of @vma->vm_page_prot, because
+ * the caching- or encryption mode may not be known at mmap() time.
+ * This is ok as long as @vma->vm_page_prot is not used by the core vm
+ * to set caching and encryption bits for those vmas (except for COW pages).
+ * This is ensured by core vm only modifying these page table entries using
+ * functions that don't touch caching- or encryption bits, using pte_modify()
+ * if needed. (See for example mprotect()).
+ * Also when new page-table entries are created, this is only done using the
+ * fault() callback, and never using the value of vma->vm_page_prot,
+ * except for page-table entries that point to anonymous pages as the result
+ * of COW.
+ *
+ * Context: Process context.  May allocate using %GFP_KERNEL.
+ * Return: vm_fault_t value.
+ */
+vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
+                                pfn_t pfn, pgprot_t pgprot)
+{
+       return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
+}
+EXPORT_SYMBOL(vmf_insert_mixed_prot);
+
 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                pfn_t pfn)
 {
-       return __vm_insert_mixed(vma, addr, pfn, false);
+       return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
 }
 EXPORT_SYMBOL(vmf_insert_mixed);
 
@@ -1797,7 +1833,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
                unsigned long addr, pfn_t pfn)
 {
-       return __vm_insert_mixed(vma, addr, pfn, true);
+       return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
 }
 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
 
index 36d8091..0a54ffa 100644 (file)
@@ -355,7 +355,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
                if (unlikely(pfn_to_nid(start_pfn) != nid))
                        continue;
 
-               if (zone && zone != page_zone(pfn_to_page(start_pfn)))
+               if (zone != page_zone(pfn_to_page(start_pfn)))
                        continue;
 
                return start_pfn;
@@ -380,7 +380,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
                if (unlikely(pfn_to_nid(pfn) != nid))
                        continue;
 
-               if (zone && zone != page_zone(pfn_to_page(pfn)))
+               if (zone != page_zone(pfn_to_page(pfn)))
                        continue;
 
                return pfn;
@@ -392,14 +392,11 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                             unsigned long end_pfn)
 {
-       unsigned long zone_start_pfn = zone->zone_start_pfn;
-       unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
-       unsigned long zone_end_pfn = z;
        unsigned long pfn;
        int nid = zone_to_nid(zone);
 
        zone_span_writelock(zone);
-       if (zone_start_pfn == start_pfn) {
+       if (zone->zone_start_pfn == start_pfn) {
                /*
                 * If the section is smallest section in the zone, it need
                 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
@@ -407,50 +404,30 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                 * for shrinking zone.
                 */
                pfn = find_smallest_section_pfn(nid, zone, end_pfn,
-                                               zone_end_pfn);
+                                               zone_end_pfn(zone));
                if (pfn) {
+                       zone->spanned_pages = zone_end_pfn(zone) - pfn;
                        zone->zone_start_pfn = pfn;
-                       zone->spanned_pages = zone_end_pfn - pfn;
+               } else {
+                       zone->zone_start_pfn = 0;
+                       zone->spanned_pages = 0;
                }
-       } else if (zone_end_pfn == end_pfn) {
+       } else if (zone_end_pfn(zone) == end_pfn) {
                /*
                 * If the section is biggest section in the zone, it need
                 * shrink zone->spanned_pages.
                 * In this case, we find second biggest valid mem_section for
                 * shrinking zone.
                 */
-               pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
+               pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
                                               start_pfn);
                if (pfn)
-                       zone->spanned_pages = pfn - zone_start_pfn + 1;
-       }
-
-       /*
-        * The section is not biggest or smallest mem_section in the zone, it
-        * only creates a hole in the zone. So in this case, we need not
-        * change the zone. But perhaps, the zone has only hole data. Thus
-        * it check the zone has only hole or not.
-        */
-       pfn = zone_start_pfn;
-       for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
-               if (unlikely(!pfn_to_online_page(pfn)))
-                       continue;
-
-               if (page_zone(pfn_to_page(pfn)) != zone)
-                       continue;
-
-               /* Skip range to be removed */
-               if (pfn >= start_pfn && pfn < end_pfn)
-                       continue;
-
-               /* If we find valid section, we have nothing to do */
-               zone_span_writeunlock(zone);
-               return;
+                       zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
+               else {
+                       zone->zone_start_pfn = 0;
+                       zone->spanned_pages = 0;
+               }
        }
-
-       /* The zone has no valid section */
-       zone->zone_start_pfn = 0;
-       zone->spanned_pages = 0;
        zone_span_writeunlock(zone);
 }
 
@@ -490,6 +467,9 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
        struct pglist_data *pgdat = zone->zone_pgdat;
        unsigned long flags;
 
+       /* Poison struct pages because they are now uninitialized again. */
+       page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+
 #ifdef CONFIG_ZONE_DEVICE
        /*
         * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
@@ -536,25 +516,20 @@ static void __remove_section(unsigned long pfn, unsigned long nr_pages,
 void __remove_pages(unsigned long pfn, unsigned long nr_pages,
                    struct vmem_altmap *altmap)
 {
+       const unsigned long end_pfn = pfn + nr_pages;
+       unsigned long cur_nr_pages;
        unsigned long map_offset = 0;
-       unsigned long nr, start_sec, end_sec;
 
        map_offset = vmem_altmap_offset(altmap);
 
        if (check_pfn_span(pfn, nr_pages, "remove"))
                return;
 
-       start_sec = pfn_to_section_nr(pfn);
-       end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-       for (nr = start_sec; nr <= end_sec; nr++) {
-               unsigned long pfns;
-
+       for (; pfn < end_pfn; pfn += cur_nr_pages) {
                cond_resched();
-               pfns = min(nr_pages, PAGES_PER_SECTION
-                               - (pfn & ~PAGE_SECTION_MASK));
-               __remove_section(pfn, pfns, map_offset, altmap);
-               pfn += pfns;
-               nr_pages -= pfns;
+               /* Select all remaining pages up to the next section boundary */
+               cur_nr_pages = min(end_pfn - pfn, -(pfn | PAGE_SECTION_MASK));
+               __remove_section(pfn, cur_nr_pages, map_offset, altmap);
                map_offset = 0;
        }
 }
@@ -1197,14 +1172,13 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) belong to the same zone.
- * When true, return its valid [start, end).
+ * Confirm all pages in a range [start, end) belong to the same zone (skipping
+ * memory holes). When true, return the zone.
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
-                        unsigned long *valid_start, unsigned long *valid_end)
+struct zone *test_pages_in_a_zone(unsigned long start_pfn,
+                                 unsigned long end_pfn)
 {
        unsigned long pfn, sec_end_pfn;
-       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
@@ -1225,24 +1199,15 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
                                continue;
                        /* Check if we got outside of the zone */
                        if (zone && !zone_spans_pfn(zone, pfn + i))
-                               return 0;
+                               return NULL;
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
-                               return 0;
-                       if (!zone)
-                               start = pfn + i;
+                               return NULL;
                        zone = page_zone(page);
-                       end = pfn + MAX_ORDER_NR_PAGES;
                }
        }
 
-       if (zone) {
-               *valid_start = start;
-               *valid_end = min(end, end_pfn);
-               return 1;
-       } else {
-               return 0;
-       }
+       return zone;
 }
 
 /*
@@ -1487,7 +1452,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
        unsigned long offlined_pages = 0;
        int ret, node, nr_isolate_pageblock;
        unsigned long flags;
-       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
        char *reason;
@@ -1512,14 +1476,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
-                                 &valid_end)) {
+       zone = test_pages_in_a_zone(start_pfn, end_pfn);
+       if (!zone) {
                ret = -EINVAL;
                reason = "multizone range";
                goto failed_removal;
        }
-
-       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
 
        /* set above range as isolated */
index 4c723d2..09b5b7a 100644 (file)
@@ -120,6 +120,8 @@ void memunmap_pages(struct dev_pagemap *pgmap)
        nid = page_to_nid(first_page);
 
        mem_hotplug_begin();
+       remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(res->start),
+                                  PHYS_PFN(resource_size(res)));
        if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
                __remove_pages(PHYS_PFN(res->start),
                               PHYS_PFN(resource_size(res)), NULL);
index edf42ed..b109287 100644 (file)
@@ -2151,6 +2151,7 @@ out_unlock:
 #ifdef CONFIG_DEVICE_PRIVATE
 static int migrate_vma_collect_hole(unsigned long start,
                                    unsigned long end,
+                                   __always_unused int depth,
                                    struct mm_walk *walk)
 {
        struct migrate_vma *migrate = walk->private;
@@ -2195,7 +2196,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
 
 again:
        if (pmd_none(*pmdp))
-               return migrate_vma_collect_hole(start, end, walk);
+               return migrate_vma_collect_hole(start, end, -1, walk);
 
        if (pmd_trans_huge(*pmdp)) {
                struct page *page;
@@ -2228,7 +2229,7 @@ again:
                                return migrate_vma_collect_skip(start, end,
                                                                walk);
                        if (pmd_none(*pmdp))
-                               return migrate_vma_collect_hole(start, end,
+                               return migrate_vma_collect_hole(start, end, -1,
                                                                walk);
                }
        }
index 49b6fa2..0e6dd99 100644 (file)
@@ -112,6 +112,7 @@ static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
 }
 
 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
+                                  __always_unused int depth,
                                   struct mm_walk *walk)
 {
        walk->private += __mincore_unmapped_range(addr, end,
index 7d70e5c..a3538cb 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
 
 static bool tlb_next_batch(struct mmu_gather *tlb)
 {
@@ -69,7 +69,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
 
        VM_BUG_ON(!tlb->end);
 
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
        VM_WARN_ON(tlb->page_size != page_size);
 #endif
 
@@ -89,58 +89,108 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
        return false;
 }
 
-#endif /* HAVE_MMU_GATHER_NO_GATHER */
+#endif /* MMU_GATHER_NO_GATHER */
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
 
-/*
- * See the comment near struct mmu_table_batch.
- */
+static void __tlb_remove_table_free(struct mmu_table_batch *batch)
+{
+       int i;
+
+       for (i = 0; i < batch->nr; i++)
+               __tlb_remove_table(batch->tables[i]);
+
+       free_page((unsigned long)batch);
+}
+
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
 
 /*
- * If we want tlb_remove_table() to imply TLB invalidates.
+ * Semi RCU freeing of the page directories.
+ *
+ * This is needed by some architectures to implement software pagetable walkers.
+ *
+ * gup_fast() and other software pagetable walkers do a lockless page-table
+ * walk and therefore needs some synchronization with the freeing of the page
+ * directories. The chosen means to accomplish that is by disabling IRQs over
+ * the walk.
+ *
+ * Architectures that use IPIs to flush TLBs will then automagically DTRT,
+ * since we unlink the page, flush TLBs, free the page. Since the disabling of
+ * IRQs delays the completion of the TLB flush we can never observe an already
+ * freed page.
+ *
+ * Architectures that do not have this (PPC) need to delay the freeing by some
+ * other means, this is that means.
+ *
+ * What we do is batch the freed directory pages (tables) and RCU free them.
+ * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
+ * holds off grace periods.
+ *
+ * However, in order to batch these pages we need to allocate storage, this
+ * allocation is deep inside the MM code and can thus easily fail on memory
+ * pressure. To guarantee progress we fall back to single table freeing, see
+ * the implementation of tlb_remove_table_one().
+ *
  */
-static inline void tlb_table_invalidate(struct mmu_gather *tlb)
-{
-#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
-       /*
-        * Invalidate page-table caches used by hardware walkers. Then we still
-        * need to RCU-sched wait while freeing the pages because software
-        * walkers can still be in-flight.
-        */
-       tlb_flush_mmu_tlbonly(tlb);
-#endif
-}
 
 static void tlb_remove_table_smp_sync(void *arg)
 {
        /* Simply deliver the interrupt */
 }
 
-static void tlb_remove_table_one(void *table)
+static void tlb_remove_table_sync_one(void)
 {
        /*
         * This isn't an RCU grace period and hence the page-tables cannot be
         * assumed to be actually RCU-freed.
         *
         * It is however sufficient for software page-table walkers that rely on
-        * IRQ disabling. See the comment near struct mmu_table_batch.
+        * IRQ disabling.
         */
        smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
-       __tlb_remove_table(table);
 }
 
 static void tlb_remove_table_rcu(struct rcu_head *head)
 {
-       struct mmu_table_batch *batch;
-       int i;
+       __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
+}
+
+static void tlb_remove_table_free(struct mmu_table_batch *batch)
+{
+       call_rcu(&batch->rcu, tlb_remove_table_rcu);
+}
 
-       batch = container_of(head, struct mmu_table_batch, rcu);
+#else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
 
-       for (i = 0; i < batch->nr; i++)
-               __tlb_remove_table(batch->tables[i]);
+static void tlb_remove_table_sync_one(void) { }
 
-       free_page((unsigned long)batch);
+static void tlb_remove_table_free(struct mmu_table_batch *batch)
+{
+       __tlb_remove_table_free(batch);
+}
+
+#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+
+/*
+ * If we want tlb_remove_table() to imply TLB invalidates.
+ */
+static inline void tlb_table_invalidate(struct mmu_gather *tlb)
+{
+       if (tlb_needs_table_invalidate()) {
+               /*
+                * Invalidate page-table caches used by hardware walkers. Then
+                * we still need to RCU-sched wait while freeing the pages
+                * because software walkers can still be in-flight.
+                */
+               tlb_flush_mmu_tlbonly(tlb);
+       }
+}
+
+static void tlb_remove_table_one(void *table)
+{
+       tlb_remove_table_sync_one();
+       __tlb_remove_table(table);
 }
 
 static void tlb_table_flush(struct mmu_gather *tlb)
@@ -149,7 +199,7 @@ static void tlb_table_flush(struct mmu_gather *tlb)
 
        if (*batch) {
                tlb_table_invalidate(tlb);
-               call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
+               tlb_remove_table_free(*batch);
                *batch = NULL;
        }
 }
@@ -173,14 +223,22 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
                tlb_table_flush(tlb);
 }
 
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+static inline void tlb_table_init(struct mmu_gather *tlb)
+{
+       tlb->batch = NULL;
+}
+
+#else /* !CONFIG_MMU_GATHER_TABLE_FREE */
+
+static inline void tlb_table_flush(struct mmu_gather *tlb) { }
+static inline void tlb_table_init(struct mmu_gather *tlb) { }
+
+#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
 
 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb_table_flush(tlb);
-#endif
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
        tlb_batch_pages_flush(tlb);
 #endif
 }
@@ -211,7 +269,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
        /* Is it from 0 to ~0? */
        tlb->fullmm     = !(start | (end+1));
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
        tlb->need_flush_all = 0;
        tlb->local.next = NULL;
        tlb->local.nr   = 0;
@@ -220,10 +278,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
        tlb->batch_count = 0;
 #endif
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-       tlb->batch = NULL;
-#endif
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+       tlb_table_init(tlb);
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
        tlb->page_size = 0;
 #endif
 
@@ -271,7 +327,7 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
 
        tlb_flush_mmu(tlb);
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
        tlb_batch_list_free(tlb);
 #endif
        dec_tlb_flush_pending(tlb->mm);
index 15e908a..3c4eb75 100644 (file)
@@ -5852,18 +5852,11 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
 /* Skip PFNs that belong to non-present sections */
 static inline __meminit unsigned long next_pfn(unsigned long pfn)
 {
-       unsigned long section_nr;
+       const unsigned long section_nr = pfn_to_section_nr(++pfn);
 
-       section_nr = pfn_to_section_nr(++pfn);
        if (present_section_nr(section_nr))
                return pfn;
-
-       while (++section_nr <= __highest_present_section_nr) {
-               if (present_section_nr(section_nr))
-                       return section_nr_to_pfn(section_nr);
-       }
-
-       return -1;
+       return section_nr_to_pfn(next_present_section_nr(section_nr));
 }
 #else
 static inline __meminit unsigned long next_pfn(unsigned long pfn)
@@ -5905,18 +5898,20 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        }
 #endif
 
-       for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+       for (pfn = start_pfn; pfn < end_pfn; ) {
                /*
                 * There can be holes in boot-time mem_map[]s handed to this
                 * function.  They do not exist on hotplugged memory.
                 */
                if (context == MEMMAP_EARLY) {
                        if (!early_pfn_valid(pfn)) {
-                               pfn = next_pfn(pfn) - 1;
+                               pfn = next_pfn(pfn);
                                continue;
                        }
-                       if (!early_pfn_in_nid(pfn, nid))
+                       if (!early_pfn_in_nid(pfn, nid)) {
+                               pfn++;
                                continue;
+                       }
                        if (overlap_memmap_init(zone, &pfn))
                                continue;
                        if (defer_init(nid, pfn, end_pfn))
@@ -5944,16 +5939,17 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
                        cond_resched();
                }
+               pfn++;
        }
 }
 
 #ifdef CONFIG_ZONE_DEVICE
 void __ref memmap_init_zone_device(struct zone *zone,
                                   unsigned long start_pfn,
-                                  unsigned long size,
+                                  unsigned long nr_pages,
                                   struct dev_pagemap *pgmap)
 {
-       unsigned long pfn, end_pfn = start_pfn + size;
+       unsigned long pfn, end_pfn = start_pfn + nr_pages;
        struct pglist_data *pgdat = zone->zone_pgdat;
        struct vmem_altmap *altmap = pgmap_altmap(pgmap);
        unsigned long zone_idx = zone_idx(zone);
@@ -5970,7 +5966,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
         */
        if (altmap) {
                start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
-               size = end_pfn - start_pfn;
+               nr_pages = end_pfn - start_pfn;
        }
 
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
@@ -6017,7 +6013,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
        }
 
        pr_info("%s initialised %lu pages in %ums\n", __func__,
-               size, jiffies_to_msecs(jiffies - start));
+               nr_pages, jiffies_to_msecs(jiffies - start));
 }
 
 #endif
@@ -6916,10 +6912,10 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
 
 #if !defined(CONFIG_FLAT_NODE_MEM_MAP)
 /*
- * Zero all valid struct pages in range [spfn, epfn), return number of struct
- * pages zeroed
+ * Initialize all valid struct pages in the range [spfn, epfn) and mark them
+ * PageReserved(). Return the number of struct pages that were initialized.
  */
-static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
+static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
 {
        unsigned long pfn;
        u64 pgcnt = 0;
@@ -6930,7 +6926,13 @@ static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
                                + pageblock_nr_pages - 1;
                        continue;
                }
-               mm_zero_struct_page(pfn_to_page(pfn));
+               /*
+                * Use a fake node/zone (0) for now. Some of these pages
+                * (in memblock.reserved but not in memblock.memory) will
+                * get re-initialized via reserve_bootmem_region() later.
+                */
+               __init_single_page(pfn_to_page(pfn), pfn, 0, 0);
+               __SetPageReserved(pfn_to_page(pfn));
                pgcnt++;
        }
 
@@ -6942,14 +6944,15 @@ static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
  * initialized by going through __init_single_page(). But, there are some
  * struct pages which are reserved in memblock allocator and their fields
  * may be accessed (for example page_to_pfn() on some configuration accesses
- * flags). We must explicitly zero those struct pages.
+ * flags). We must explicitly initialize those struct pages.
  *
  * This function also addresses a similar issue where struct pages are left
  * uninitialized because the physical address range is not covered by
  * memblock.memory or memblock.reserved. That could happen when memblock
- * layout is manually configured via memmap=.
+ * layout is manually configured via memmap=, or when the highest physical
+ * address (max_pfn) does not end on a section boundary.
  */
-void __init zero_resv_unavail(void)
+static void __init init_unavailable_mem(void)
 {
        phys_addr_t start, end;
        u64 i, pgcnt;
@@ -6962,10 +6965,20 @@ void __init zero_resv_unavail(void)
        for_each_mem_range(i, &memblock.memory, NULL,
                        NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
                if (next < start)
-                       pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start));
+                       pgcnt += init_unavailable_range(PFN_DOWN(next),
+                                                       PFN_UP(start));
                next = end;
        }
-       pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn);
+
+       /*
+        * Early sections always have a fully populated memmap for the whole
+        * section - see pfn_valid(). If the last section has holes at the
+        * end and that section is marked "online", the memmap will be
+        * considered initialized. Make sure that memmap has a well defined
+        * state.
+        */
+       pgcnt += init_unavailable_range(PFN_DOWN(next),
+                                       round_up(max_pfn, PAGES_PER_SECTION));
 
        /*
         * Struct pages that do not have backing memory. This could be because
@@ -6974,6 +6987,10 @@ void __init zero_resv_unavail(void)
        if (pgcnt)
                pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
 }
+#else
+static inline void __init init_unavailable_mem(void)
+{
+}
 #endif /* !CONFIG_FLAT_NODE_MEM_MAP */
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -7403,7 +7420,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        /* Initialise every node */
        mminit_verify_pageflags_layout();
        setup_nr_node_ids();
-       zero_resv_unavail();
+       init_unavailable_mem();
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
                free_area_init_node(nid, NULL,
@@ -7598,7 +7615,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 
 void __init free_area_init(unsigned long *zones_size)
 {
-       zero_resv_unavail();
+       init_unavailable_mem();
        free_area_init_node(0, zones_size,
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
index ea0b9e6..928df16 100644 (file)
@@ -4,26 +4,57 @@
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
 
-static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
-                         struct mm_walk *walk)
+/*
+ * We want to know the real level where a entry is located ignoring any
+ * folding of levels which may be happening. For example if p4d is folded then
+ * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
+ */
+static int real_depth(int depth)
+{
+       if (depth == 3 && PTRS_PER_PMD == 1)
+               depth = 2;
+       if (depth == 2 && PTRS_PER_PUD == 1)
+               depth = 1;
+       if (depth == 1 && PTRS_PER_P4D == 1)
+               depth = 0;
+       return depth;
+}
+
+static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
+                               unsigned long end, struct mm_walk *walk)
 {
-       pte_t *pte;
-       int err = 0;
        const struct mm_walk_ops *ops = walk->ops;
-       spinlock_t *ptl;
+       int err = 0;
 
-       pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        for (;;) {
                err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
                if (err)
                       break;
-               addr += PAGE_SIZE;
-               if (addr == end)
+               if (addr >= end - PAGE_SIZE)
                        break;
+               addr += PAGE_SIZE;
                pte++;
        }
+       return err;
+}
+
+static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+                         struct mm_walk *walk)
+{
+       pte_t *pte;
+       int err = 0;
+       spinlock_t *ptl;
+
+       if (walk->no_vma) {
+               pte = pte_offset_map(pmd, addr);
+               err = walk_pte_range_inner(pte, addr, end, walk);
+               pte_unmap(pte);
+       } else {
+               pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+               err = walk_pte_range_inner(pte, addr, end, walk);
+               pte_unmap_unlock(pte, ptl);
+       }
 
-       pte_unmap_unlock(pte, ptl);
        return err;
 }
 
@@ -34,18 +65,22 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
        unsigned long next;
        const struct mm_walk_ops *ops = walk->ops;
        int err = 0;
+       int depth = real_depth(3);
 
        pmd = pmd_offset(pud, addr);
        do {
 again:
                next = pmd_addr_end(addr, end);
-               if (pmd_none(*pmd) || !walk->vma) {
+               if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
                        if (ops->pte_hole)
-                               err = ops->pte_hole(addr, next, walk);
+                               err = ops->pte_hole(addr, next, depth, walk);
                        if (err)
                                break;
                        continue;
                }
+
+               walk->action = ACTION_SUBTREE;
+
                /*
                 * This implies that each ->pmd_entry() handler
                 * needs to know about pmd_trans_huge() pmds
@@ -55,16 +90,24 @@ again:
                if (err)
                        break;
 
+               if (walk->action == ACTION_AGAIN)
+                       goto again;
+
                /*
                 * Check this here so we only break down trans_huge
                 * pages when we _need_ to
                 */
-               if (!ops->pte_entry)
+               if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
+                   walk->action == ACTION_CONTINUE ||
+                   !(ops->pte_entry))
                        continue;
 
-               split_huge_pmd(walk->vma, pmd, addr);
-               if (pmd_trans_unstable(pmd))
-                       goto again;
+               if (walk->vma) {
+                       split_huge_pmd(walk->vma, pmd, addr);
+                       if (pmd_trans_unstable(pmd))
+                               goto again;
+               }
+
                err = walk_pte_range(pmd, addr, next, walk);
                if (err)
                        break;
@@ -80,37 +123,41 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
        unsigned long next;
        const struct mm_walk_ops *ops = walk->ops;
        int err = 0;
+       int depth = real_depth(2);
 
        pud = pud_offset(p4d, addr);
        do {
  again:
                next = pud_addr_end(addr, end);
-               if (pud_none(*pud) || !walk->vma) {
+               if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
                        if (ops->pte_hole)
-                               err = ops->pte_hole(addr, next, walk);
+                               err = ops->pte_hole(addr, next, depth, walk);
                        if (err)
                                break;
                        continue;
                }
 
-               if (ops->pud_entry) {
-                       spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
+               walk->action = ACTION_SUBTREE;
 
-                       if (ptl) {
-                               err = ops->pud_entry(pud, addr, next, walk);
-                               spin_unlock(ptl);
-                               if (err)
-                                       break;
-                               continue;
-                       }
-               }
+               if (ops->pud_entry)
+                       err = ops->pud_entry(pud, addr, next, walk);
+               if (err)
+                       break;
+
+               if (walk->action == ACTION_AGAIN)
+                       goto again;
 
-               split_huge_pud(walk->vma, pud, addr);
+               if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
+                   walk->action == ACTION_CONTINUE ||
+                   !(ops->pmd_entry || ops->pte_entry))
+                       continue;
+
+               if (walk->vma)
+                       split_huge_pud(walk->vma, pud, addr);
                if (pud_none(*pud))
                        goto again;
 
-               if (ops->pmd_entry || ops->pte_entry)
-                       err = walk_pmd_range(pud, addr, next, walk);
+               err = walk_pmd_range(pud, addr, next, walk);
                if (err)
                        break;
        } while (pud++, addr = next, addr != end);
@@ -125,18 +172,24 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
        unsigned long next;
        const struct mm_walk_ops *ops = walk->ops;
        int err = 0;
+       int depth = real_depth(1);
 
        p4d = p4d_offset(pgd, addr);
        do {
                next = p4d_addr_end(addr, end);
                if (p4d_none_or_clear_bad(p4d)) {
                        if (ops->pte_hole)
-                               err = ops->pte_hole(addr, next, walk);
+                               err = ops->pte_hole(addr, next, depth, walk);
                        if (err)
                                break;
                        continue;
                }
-               if (ops->pmd_entry || ops->pte_entry)
+               if (ops->p4d_entry) {
+                       err = ops->p4d_entry(p4d, addr, next, walk);
+                       if (err)
+                               break;
+               }
+               if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
                        err = walk_pud_range(p4d, addr, next, walk);
                if (err)
                        break;
@@ -153,17 +206,26 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
        const struct mm_walk_ops *ops = walk->ops;
        int err = 0;
 
-       pgd = pgd_offset(walk->mm, addr);
+       if (walk->pgd)
+               pgd = walk->pgd + pgd_index(addr);
+       else
+               pgd = pgd_offset(walk->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd)) {
                        if (ops->pte_hole)
-                               err = ops->pte_hole(addr, next, walk);
+                               err = ops->pte_hole(addr, next, 0, walk);
                        if (err)
                                break;
                        continue;
                }
-               if (ops->pmd_entry || ops->pte_entry)
+               if (ops->pgd_entry) {
+                       err = ops->pgd_entry(pgd, addr, next, walk);
+                       if (err)
+                               break;
+               }
+               if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
+                   ops->pte_entry)
                        err = walk_p4d_range(pgd, addr, next, walk);
                if (err)
                        break;
@@ -199,7 +261,7 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
                if (pte)
                        err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
                else if (ops->pte_hole)
-                       err = ops->pte_hole(addr, next, walk);
+                       err = ops->pte_hole(addr, next, -1, walk);
 
                if (err)
                        break;
@@ -243,7 +305,7 @@ static int walk_page_test(unsigned long start, unsigned long end,
        if (vma->vm_flags & VM_PFNMAP) {
                int err = 1;
                if (ops->pte_hole)
-                       err = ops->pte_hole(start, end, walk);
+                       err = ops->pte_hole(start, end, -1, walk);
                return err ? err : 1;
        }
        return 0;
@@ -369,6 +431,33 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
        return err;
 }
 
+/*
+ * Similar to walk_page_range() but can walk any page tables even if they are
+ * not backed by VMAs. Because 'unusual' entries may be walked this function
+ * will also not lock the PTEs for the pte_entry() callback. This is useful for
+ * walking the kernel pages tables or page tables for firmware.
+ */
+int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
+                         unsigned long end, const struct mm_walk_ops *ops,
+                         pgd_t *pgd,
+                         void *private)
+{
+       struct mm_walk walk = {
+               .ops            = ops,
+               .mm             = mm,
+               .pgd            = pgd,
+               .private        = private,
+               .no_vma         = true
+       };
+
+       if (start >= end || !walk.mm)
+               return -EINVAL;
+
+       lockdep_assert_held(&walk.mm->mmap_sem);
+
+       return __walk_page_range(start, end, &walk);
+}
+
 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
                void *private)
 {
diff --git a/mm/ptdump.c b/mm/ptdump.c
new file mode 100644 (file)
index 0000000..26208d0
--- /dev/null
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/pagewalk.h>
+#include <linux/ptdump.h>
+#include <linux/kasan.h>
+
+#ifdef CONFIG_KASAN
+/*
+ * This is an optimization for KASAN=y case. Since all kasan page tables
+ * eventually point to the kasan_early_shadow_page we could call note_page()
+ * right away without walking through lower level page tables. This saves
+ * us dozens of seconds (minutes for 5-level config) while checking for
+ * W+X mapping or reading kernel_page_tables debugfs file.
+ */
+static inline int note_kasan_page_table(struct mm_walk *walk,
+                                       unsigned long addr)
+{
+       struct ptdump_state *st = walk->private;
+
+       st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0]));
+
+       walk->action = ACTION_CONTINUE;
+
+       return 0;
+}
+#endif
+
+static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
+                           unsigned long next, struct mm_walk *walk)
+{
+       struct ptdump_state *st = walk->private;
+       pgd_t val = READ_ONCE(*pgd);
+
+#if CONFIG_PGTABLE_LEVELS > 4 && defined(CONFIG_KASAN)
+       if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
+               return note_kasan_page_table(walk, addr);
+#endif
+
+       if (pgd_leaf(val))
+               st->note_page(st, addr, 0, pgd_val(val));
+
+       return 0;
+}
+
+static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
+                           unsigned long next, struct mm_walk *walk)
+{
+       struct ptdump_state *st = walk->private;
+       p4d_t val = READ_ONCE(*p4d);
+
+#if CONFIG_PGTABLE_LEVELS > 3 && defined(CONFIG_KASAN)
+       if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
+               return note_kasan_page_table(walk, addr);
+#endif
+
+       if (p4d_leaf(val))
+               st->note_page(st, addr, 1, p4d_val(val));
+
+       return 0;
+}
+
+static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
+                           unsigned long next, struct mm_walk *walk)
+{
+       struct ptdump_state *st = walk->private;
+       pud_t val = READ_ONCE(*pud);
+
+#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_KASAN)
+       if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
+               return note_kasan_page_table(walk, addr);
+#endif
+
+       if (pud_leaf(val))
+               st->note_page(st, addr, 2, pud_val(val));
+
+       return 0;
+}
+
+static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
+                           unsigned long next, struct mm_walk *walk)
+{
+       struct ptdump_state *st = walk->private;
+       pmd_t val = READ_ONCE(*pmd);
+
+#if defined(CONFIG_KASAN)
+       if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
+               return note_kasan_page_table(walk, addr);
+#endif
+
+       if (pmd_leaf(val))
+               st->note_page(st, addr, 3, pmd_val(val));
+
+       return 0;
+}
+
+static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
+                           unsigned long next, struct mm_walk *walk)
+{
+       struct ptdump_state *st = walk->private;
+
+       st->note_page(st, addr, 4, pte_val(READ_ONCE(*pte)));
+
+       return 0;
+}
+
+static int ptdump_hole(unsigned long addr, unsigned long next,
+                      int depth, struct mm_walk *walk)
+{
+       struct ptdump_state *st = walk->private;
+
+       st->note_page(st, addr, depth, 0);
+
+       return 0;
+}
+
+static const struct mm_walk_ops ptdump_ops = {
+       .pgd_entry      = ptdump_pgd_entry,
+       .p4d_entry      = ptdump_p4d_entry,
+       .pud_entry      = ptdump_pud_entry,
+       .pmd_entry      = ptdump_pmd_entry,
+       .pte_entry      = ptdump_pte_entry,
+       .pte_hole       = ptdump_hole,
+};
+
+void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
+{
+       const struct ptdump_range *range = st->range;
+
+       down_read(&mm->mmap_sem);
+       while (range->start != range->end) {
+               walk_page_range_novma(mm, range->start, range->end,
+                                     &ptdump_ops, pgd, st);
+               range++;
+       }
+       up_read(&mm->mmap_sem);
+
+       /* Flush out the last page */
+       st->note_page(st, 0, -1, 0);
+}
index 0d95dde..1907cb2 100644 (file)
@@ -1580,18 +1580,17 @@ static int slabinfo_open(struct inode *inode, struct file *file)
        return seq_open(file, &slabinfo_op);
 }
 
-static const struct file_operations proc_slabinfo_operations = {
-       .open           = slabinfo_open,
-       .read           = seq_read,
-       .write          = slabinfo_write,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
+static const struct proc_ops slabinfo_proc_ops = {
+       .proc_open      = slabinfo_open,
+       .proc_read      = seq_read,
+       .proc_write     = slabinfo_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 static int __init slab_proc_init(void)
 {
-       proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
-                                               &proc_slabinfo_operations);
+       proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
        return 0;
 }
 module_init(slab_proc_init);
@@ -1676,28 +1675,6 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
        return ret;
 }
 
-/**
- * __krealloc - like krealloc() but don't free @p.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * This function is like krealloc() except it never frees the originally
- * allocated buffer. Use this if you don't want to free the buffer immediately
- * like, for example, with RCU.
- *
- * Return: pointer to the allocated memory or %NULL in case of error
- */
-void *__krealloc(const void *p, size_t new_size, gfp_t flags)
-{
-       if (unlikely(!new_size))
-               return ZERO_SIZE_PTR;
-
-       return __do_krealloc(p, new_size, flags);
-
-}
-EXPORT_SYMBOL(__krealloc);
-
 /**
  * krealloc - reallocate memory. The contents will remain unchanged.
  * @p: object to reallocate memory for.
index 3918fc3..c184b69 100644 (file)
@@ -198,16 +198,6 @@ static void section_mark_present(struct mem_section *ms)
        ms->section_mem_map |= SECTION_MARKED_PRESENT;
 }
 
-static inline unsigned long next_present_section_nr(unsigned long section_nr)
-{
-       do {
-               section_nr++;
-               if (present_section_nr(section_nr))
-                       return section_nr;
-       } while ((section_nr <= __highest_present_section_nr));
-
-       return -1;
-}
 #define for_each_present_section_nr(start, section_nr)         \
        for (section_nr = next_present_section_nr(start-1);     \
             ((section_nr != -1) &&                             \
index 6febae9..2c33ff4 100644 (file)
@@ -2796,17 +2796,17 @@ static int swaps_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-static const struct file_operations proc_swaps_operations = {
-       .open           = swaps_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
-       .poll           = swaps_poll,
+static const struct proc_ops swaps_proc_ops = {
+       .proc_open      = swaps_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
+       .proc_poll      = swaps_poll,
 };
 
 static int __init procswaps_init(void)
 {
-       proc_create("swaps", 0, NULL, &proc_swaps_operations);
+       proc_create("swaps", 0, NULL, &swaps_proc_ops);
        return 0;
 }
 __initcall(procswaps_init);
index 46d6cd9..829db9e 100644 (file)
@@ -53,15 +53,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
 
 static int parse_qos(const char *buff);
 
-/*
- *   Define allowed FILE OPERATIONS
- */
-static const struct file_operations mpc_file_operations = {
-       .open =         proc_mpc_open,
-       .read =         seq_read,
-       .llseek =       seq_lseek,
-       .write =        proc_mpc_write,
-       .release =      seq_release,
+static const struct proc_ops mpc_proc_ops = {
+       .proc_open      = proc_mpc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = proc_mpc_write,
+       .proc_release   = seq_release,
 };
 
 /*
@@ -290,7 +287,7 @@ int mpc_proc_init(void)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
+       p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_proc_ops);
        if (!p) {
                pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
                return -ENOMEM;
index c318967..4369ffa 100644 (file)
@@ -36,9 +36,9 @@
 static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
                                 size_t count, loff_t *pos);
 
-static const struct file_operations proc_atm_dev_ops = {
-       .read =         proc_dev_atm_read,
-       .llseek =       noop_llseek,
+static const struct proc_ops atm_dev_proc_ops = {
+       .proc_read      = proc_dev_atm_read,
+       .proc_lseek     = noop_llseek,
 };
 
 static void add_stats(struct seq_file *seq, const char *aal,
@@ -359,7 +359,7 @@ int atm_proc_dev_register(struct atm_dev *dev)
                goto err_out;
 
        dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
-                                          &proc_atm_dev_ops, dev);
+                                          &atm_dev_proc_ops, dev);
        if (!dev->proc_entry)
                goto err_free_name;
        return 0;
index 59d0ba2..ce09bb4 100644 (file)
@@ -13,5 +13,5 @@ libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
        auth.o auth_none.o \
        crypto.o armor.o \
        auth_x.o \
-       ceph_fs.o ceph_strings.o ceph_hash.o \
+       ceph_strings.o ceph_hash.o \
        pagevec.o snapshot.o string_table.o
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
deleted file mode 100644 (file)
index 756a2dc..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Some non-inline ceph helpers
- */
-#include <linux/module.h>
-#include <linux/ceph/types.h>
-
-/*
- * return true if @layout appears to be valid
- */
-int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
-{
-       __u32 su = layout->stripe_unit;
-       __u32 sc = layout->stripe_count;
-       __u32 os = layout->object_size;
-
-       /* stripe unit, object size must be non-zero, 64k increment */
-       if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
-               return 0;
-       if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
-               return 0;
-       /* object size must be a multiple of stripe unit */
-       if (os < su || os % su)
-               return 0;
-       /* stripe count must be non-zero */
-       if (!sc)
-               return 0;
-       return 1;
-}
-
-void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
-                                 struct ceph_file_layout_legacy *legacy)
-{
-       fl->stripe_unit = le32_to_cpu(legacy->fl_stripe_unit);
-       fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
-       fl->object_size = le32_to_cpu(legacy->fl_object_size);
-       fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
-       if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
-           fl->stripe_count == 0 && fl->object_size == 0)
-               fl->pool_id = -1;
-}
-EXPORT_SYMBOL(ceph_file_layout_from_legacy);
-
-void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
-                               struct ceph_file_layout_legacy *legacy)
-{
-       legacy->fl_stripe_unit = cpu_to_le32(fl->stripe_unit);
-       legacy->fl_stripe_count = cpu_to_le32(fl->stripe_count);
-       legacy->fl_object_size = cpu_to_le32(fl->object_size);
-       if (fl->pool_id >= 0)
-               legacy->fl_pg_pool = cpu_to_le32(fl->pool_id);
-       else
-               legacy->fl_pg_pool = 0;
-}
-EXPORT_SYMBOL(ceph_file_layout_to_legacy);
-
-int ceph_flags_to_mode(int flags)
-{
-       int mode;
-
-#ifdef O_DIRECTORY  /* fixme */
-       if ((flags & O_DIRECTORY) == O_DIRECTORY)
-               return CEPH_FILE_MODE_PIN;
-#endif
-
-       switch (flags & O_ACCMODE) {
-       case O_WRONLY:
-               mode = CEPH_FILE_MODE_WR;
-               break;
-       case O_RDONLY:
-               mode = CEPH_FILE_MODE_RD;
-               break;
-       case O_RDWR:
-       case O_ACCMODE: /* this is what the VFS does */
-               mode = CEPH_FILE_MODE_RDWR;
-               break;
-       }
-#ifdef O_LAZY
-       if (flags & O_LAZY)
-               mode |= CEPH_FILE_MODE_LAZY;
-#endif
-
-       return mode;
-}
-EXPORT_SYMBOL(ceph_flags_to_mode);
-
-int ceph_caps_for_mode(int mode)
-{
-       int caps = CEPH_CAP_PIN;
-
-       if (mode & CEPH_FILE_MODE_RD)
-               caps |= CEPH_CAP_FILE_SHARED |
-                       CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
-       if (mode & CEPH_FILE_MODE_WR)
-               caps |= CEPH_CAP_FILE_EXCL |
-                       CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
-                       CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
-                       CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
-       if (mode & CEPH_FILE_MODE_LAZY)
-               caps |= CEPH_CAP_FILE_LAZYIO;
-
-       return caps;
-}
-EXPORT_SYMBOL(ceph_caps_for_mode);
index ba45b07..b68b376 100644 (file)
@@ -402,7 +402,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
        case CEPH_OSD_OP_LIST_WATCHERS:
                ceph_osd_data_release(&op->list_watchers.response_data);
                break;
-       case CEPH_OSD_OP_COPY_FROM:
+       case CEPH_OSD_OP_COPY_FROM2:
                ceph_osd_data_release(&op->copy_from.osd_data);
                break;
        default:
@@ -697,7 +697,7 @@ static void get_num_data_items(struct ceph_osd_request *req,
                case CEPH_OSD_OP_SETXATTR:
                case CEPH_OSD_OP_CMPXATTR:
                case CEPH_OSD_OP_NOTIFY_ACK:
-               case CEPH_OSD_OP_COPY_FROM:
+               case CEPH_OSD_OP_COPY_FROM2:
                        *num_request_data_items += 1;
                        break;
 
@@ -1029,7 +1029,7 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
        case CEPH_OSD_OP_CREATE:
        case CEPH_OSD_OP_DELETE:
                break;
-       case CEPH_OSD_OP_COPY_FROM:
+       case CEPH_OSD_OP_COPY_FROM2:
                dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
                dst->copy_from.src_version =
                        cpu_to_le64(src->copy_from.src_version);
@@ -1966,7 +1966,7 @@ static void setup_request_data(struct ceph_osd_request *req)
                        ceph_osdc_msg_data_add(request_msg,
                                               &op->notify_ack.request_data);
                        break;
-               case CEPH_OSD_OP_COPY_FROM:
+               case CEPH_OSD_OP_COPY_FROM2:
                        ceph_osdc_msg_data_add(request_msg,
                                               &op->copy_from.osd_data);
                        break;
@@ -5315,6 +5315,7 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
                                     struct ceph_object_locator *src_oloc,
                                     u32 src_fadvise_flags,
                                     u32 dst_fadvise_flags,
+                                    u32 truncate_seq, u64 truncate_size,
                                     u8 copy_from_flags)
 {
        struct ceph_osd_req_op *op;
@@ -5325,7 +5326,8 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
-       op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
+       op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
+                             dst_fadvise_flags);
        op->copy_from.snapid = src_snapid;
        op->copy_from.src_version = src_version;
        op->copy_from.flags = copy_from_flags;
@@ -5335,6 +5337,8 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
        end = p + PAGE_SIZE;
        ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
        encode_oloc(&p, end, src_oloc);
+       ceph_encode_32(&p, truncate_seq);
+       ceph_encode_64(&p, truncate_size);
        op->indata_len = PAGE_SIZE - (end - p);
 
        ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
@@ -5350,6 +5354,7 @@ int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
                        struct ceph_object_id *dst_oid,
                        struct ceph_object_locator *dst_oloc,
                        u32 dst_fadvise_flags,
+                       u32 truncate_seq, u64 truncate_size,
                        u8 copy_from_flags)
 {
        struct ceph_osd_request *req;
@@ -5366,7 +5371,8 @@ int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
 
        ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
                                        src_oloc, src_fadvise_flags,
-                                       dst_fadvise_flags, copy_from_flags);
+                                       dst_fadvise_flags, truncate_seq,
+                                       truncate_size, copy_from_flags);
        if (ret)
                goto out;
 
index 17529d4..a69e8bd 100644 (file)
@@ -5792,7 +5792,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (&ptype->list == head)
                goto normal;
 
-       if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
+       if (PTR_ERR(pp) == -EINPROGRESS) {
                ret = GRO_CONSUMED;
                goto ok;
        }
index 792e374..c180871 100644 (file)
@@ -1573,7 +1573,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
                return -EPERM;
 
        prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
-       if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL)
+       if (PTR_ERR(prog) == -EINVAL)
                prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
        if (IS_ERR(prog))
                return PTR_ERR(prog);
index 294bfcf..acc849d 100644 (file)
@@ -535,12 +535,12 @@ static int pgctrl_open(struct inode *inode, struct file *file)
        return single_open(file, pgctrl_show, PDE_DATA(inode));
 }
 
-static const struct file_operations pktgen_fops = {
-       .open    = pgctrl_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .write   = pgctrl_write,
-       .release = single_release,
+static const struct proc_ops pktgen_proc_ops = {
+       .proc_open      = pgctrl_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = pgctrl_write,
+       .proc_release   = single_release,
 };
 
 static int pktgen_if_show(struct seq_file *seq, void *v)
@@ -1707,12 +1707,12 @@ static int pktgen_if_open(struct inode *inode, struct file *file)
        return single_open(file, pktgen_if_show, PDE_DATA(inode));
 }
 
-static const struct file_operations pktgen_if_fops = {
-       .open    = pktgen_if_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .write   = pktgen_if_write,
-       .release = single_release,
+static const struct proc_ops pktgen_if_proc_ops = {
+       .proc_open      = pktgen_if_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = pktgen_if_write,
+       .proc_release   = single_release,
 };
 
 static int pktgen_thread_show(struct seq_file *seq, void *v)
@@ -1844,12 +1844,12 @@ static int pktgen_thread_open(struct inode *inode, struct file *file)
        return single_open(file, pktgen_thread_show, PDE_DATA(inode));
 }
 
-static const struct file_operations pktgen_thread_fops = {
-       .open    = pktgen_thread_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .write   = pktgen_thread_write,
-       .release = single_release,
+static const struct proc_ops pktgen_thread_proc_ops = {
+       .proc_open      = pktgen_thread_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_write     = pktgen_thread_write,
+       .proc_release   = single_release,
 };
 
 /* Think find or remove for NN */
@@ -1926,7 +1926,7 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
 
                        pkt_dev->entry = proc_create_data(dev->name, 0600,
                                                          pn->proc_dir,
-                                                         &pktgen_if_fops,
+                                                         &pktgen_if_proc_ops,
                                                          pkt_dev);
                        if (!pkt_dev->entry)
                                pr_err("can't move proc entry for '%s'\n",
@@ -3638,7 +3638,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
                pkt_dev->clone_skb = pg_clone_skb_d;
 
        pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir,
-                                         &pktgen_if_fops, pkt_dev);
+                                         &pktgen_if_proc_ops, pkt_dev);
        if (!pkt_dev->entry) {
                pr_err("cannot create %s/%s procfs entry\n",
                       PG_PROC_DIR, ifname);
@@ -3708,7 +3708,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
        t->tsk = p;
 
        pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
-                             &pktgen_thread_fops, t);
+                             &pktgen_thread_proc_ops, t);
        if (!pe) {
                pr_err("cannot create %s/%s procfs entry\n",
                       PG_PROC_DIR, t->tsk->comm);
@@ -3793,7 +3793,7 @@ static int __net_init pg_net_init(struct net *net)
                pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
                return -ENODEV;
        }
-       pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_fops);
+       pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_proc_ops);
        if (pe == NULL) {
                pr_err("cannot create %s procfs entry\n", PGCTRL);
                ret = -EINVAL;
index ee56129..fbfd0db 100644 (file)
@@ -27,6 +27,8 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
 
        rcu_read_lock(); /* hsr->node_db, hsr->ports */
        port = hsr_port_get_rcu(skb->dev);
+       if (!port)
+               goto finish_pass;
 
        if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
                /* Directly kill frames sent by ourselves */
index f35308f..4438f6b 100644 (file)
@@ -1334,7 +1334,7 @@ static int __init ipconfig_proc_net_init(void)
 
 /* Create a new file under /proc/net/ipconfig */
 static int ipconfig_proc_net_create(const char *name,
-                                   const struct file_operations *fops)
+                                   const struct proc_ops *proc_ops)
 {
        char *pname;
        struct proc_dir_entry *p;
@@ -1346,7 +1346,7 @@ static int ipconfig_proc_net_create(const char *name,
        if (!pname)
                return -ENOMEM;
 
-       p = proc_create(pname, 0444, init_net.proc_net, fops);
+       p = proc_create(pname, 0444, init_net.proc_net, proc_ops);
        kfree(pname);
        if (!p)
                return -ENOMEM;
@@ -1355,7 +1355,7 @@ static int ipconfig_proc_net_create(const char *name,
 }
 
 /* Write NTP server IP addresses to /proc/net/ipconfig/ntp_servers */
-static int ntp_servers_seq_show(struct seq_file *seq, void *v)
+static int ntp_servers_show(struct seq_file *seq, void *v)
 {
        int i;
 
@@ -1365,7 +1365,7 @@ static int ntp_servers_seq_show(struct seq_file *seq, void *v)
        }
        return 0;
 }
-DEFINE_SHOW_ATTRIBUTE(ntp_servers_seq);
+DEFINE_PROC_SHOW_ATTRIBUTE(ntp_servers);
 #endif /* CONFIG_PROC_FS */
 
 /*
@@ -1456,7 +1456,7 @@ static int __init ip_auto_config(void)
        proc_create_single("pnp", 0444, init_net.proc_net, pnp_seq_show);
 
        if (ipconfig_proc_net_init() == 0)
-               ipconfig_proc_net_create("ntp_servers", &ntp_servers_seq_fops);
+               ipconfig_proc_net_create("ntp_servers", &ntp_servers_proc_ops);
 #endif /* CONFIG_PROC_FS */
 
        if (!ic_enable)
index 6bdb1ab..f8755a4 100644 (file)
@@ -58,7 +58,7 @@ struct clusterip_config {
 };
 
 #ifdef CONFIG_PROC_FS
-static const struct file_operations clusterip_proc_fops;
+static const struct proc_ops clusterip_proc_ops;
 #endif
 
 struct clusterip_net {
@@ -280,7 +280,7 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
                mutex_lock(&cn->mutex);
                c->pde = proc_create_data(buffer, 0600,
                                          cn->procdir,
-                                         &clusterip_proc_fops, c);
+                                         &clusterip_proc_ops, c);
                mutex_unlock(&cn->mutex);
                if (!c->pde) {
                        err = -ENOMEM;
@@ -804,12 +804,12 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
        return size;
 }
 
-static const struct file_operations clusterip_proc_fops = {
-       .open    = clusterip_proc_open,
-       .read    = seq_read,
-       .write   = clusterip_proc_write,
-       .llseek  = seq_lseek,
-       .release = clusterip_proc_release,
+static const struct proc_ops clusterip_proc_ops = {
+       .proc_open      = clusterip_proc_open,
+       .proc_read      = seq_read,
+       .proc_write     = clusterip_proc_write,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = clusterip_proc_release,
 };
 
 #endif /* CONFIG_PROC_FS */
index d5c57b3..ebe7060 100644 (file)
@@ -237,11 +237,11 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
        return seq_open(file, &rt_cache_seq_ops);
 }
 
-static const struct file_operations rt_cache_seq_fops = {
-       .open    = rt_cache_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
+static const struct proc_ops rt_cache_proc_ops = {
+       .proc_open      = rt_cache_seq_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 
@@ -328,11 +328,11 @@ static int rt_cpu_seq_open(struct inode *inode, struct file *file)
        return seq_open(file, &rt_cpu_seq_ops);
 }
 
-static const struct file_operations rt_cpu_seq_fops = {
-       .open    = rt_cpu_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
+static const struct proc_ops rt_cpu_proc_ops = {
+       .proc_open      = rt_cpu_seq_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
 };
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
@@ -366,12 +366,12 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
        struct proc_dir_entry *pde;
 
        pde = proc_create("rt_cache", 0444, net->proc_net,
-                         &rt_cache_seq_fops);
+                         &rt_cache_proc_ops);
        if (!pde)
                goto err1;
 
        pde = proc_create("rt_cache", 0444,
-                         net->proc_net_stat, &rt_cpu_seq_fops);
+                         net->proc_net_stat, &rt_cpu_proc_ops);
        if (!pde)
                goto err2;
 
index 484485a..eb2d805 100644 (file)
@@ -2622,10 +2622,12 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->snd_cwnd = TCP_INIT_CWND;
        tp->snd_cwnd_cnt = 0;
        tp->window_clamp = 0;
+       tp->delivered = 0;
        tp->delivered_ce = 0;
        tcp_set_ca_state(sk, TCP_CA_Open);
        tp->is_sack_reneg = 0;
        tcp_clear_retrans(tp);
+       tp->total_retrans = 0;
        inet_csk_delack_init(sk);
        /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
         * issue in __tcp_select_window()
@@ -2637,10 +2639,14 @@ int tcp_disconnect(struct sock *sk, int flags)
        sk->sk_rx_dst = NULL;
        tcp_saved_syn_free(tp);
        tp->compressed_ack = 0;
+       tp->segs_in = 0;
+       tp->segs_out = 0;
        tp->bytes_sent = 0;
        tp->bytes_acked = 0;
        tp->bytes_received = 0;
        tp->bytes_retrans = 0;
+       tp->data_segs_in = 0;
+       tp->data_segs_out = 0;
        tp->duplicate_sack[0].start_seq = 0;
        tp->duplicate_sack[0].end_seq = 0;
        tp->dsack_dups = 0;
index e325b45..316ebdf 100644 (file)
@@ -5908,8 +5908,14 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 *        the segment and return)"
                 */
                if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
-                   after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
+                   after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
+                       /* Previous FIN/ACK or RST/ACK might be ignored. */
+                       if (icsk->icsk_retransmits == 0)
+                               inet_csk_reset_xmit_timer(sk,
+                                               ICSK_TIME_RETRANS,
+                                               TCP_TIMEOUT_MIN, TCP_RTO_MAX);
                        goto reset_and_undo;
+               }
 
                if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
                    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
index c99223c..fcb53ed 100644 (file)
@@ -320,8 +320,13 @@ int l2tp_session_register(struct l2tp_session *session,
 
                spin_lock_bh(&pn->l2tp_session_hlist_lock);
 
+               /* IP encap expects session IDs to be globally unique, while
+                * UDP encap doesn't.
+                */
                hlist_for_each_entry(session_walk, g_head, global_hlist)
-                       if (session_walk->session_id == session->session_id) {
+                       if (session_walk->session_id == session->session_id &&
+                           (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
+                            tunnel->encap == L2TP_ENCAPTYPE_IP)) {
                                err = -EEXIST;
                                goto err_tlock_pnlock;
                        }
index cf895bc..69c107f 100644 (file)
@@ -1483,31 +1483,34 @@ ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
 };
 
 static int
-dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
+ip_set_dump_start(struct netlink_callback *cb)
 {
        struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
        int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
        struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
        struct nlattr *attr = (void *)nlh + min_len;
+       struct sk_buff *skb = cb->skb;
+       struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
        u32 dump_type;
-       ip_set_id_t index;
        int ret;
 
        ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
                        nlh->nlmsg_len - min_len,
                        ip_set_dump_policy, NULL);
        if (ret)
-               return ret;
+               goto error;
 
        cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
        if (cda[IPSET_ATTR_SETNAME]) {
+               ip_set_id_t index;
                struct ip_set *set;
 
                set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
                                      &index);
-               if (!set)
-                       return -ENOENT;
-
+               if (!set) {
+                       ret = -ENOENT;
+                       goto error;
+               }
                dump_type = DUMP_ONE;
                cb->args[IPSET_CB_INDEX] = index;
        } else {
@@ -1523,10 +1526,17 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
        cb->args[IPSET_CB_DUMP] = dump_type;
 
        return 0;
+
+error:
+       /* We have to create and send the error message manually :-( */
+       if (nlh->nlmsg_flags & NLM_F_ACK) {
+               netlink_ack(cb->skb, nlh, ret, NULL);
+       }
+       return ret;
 }
 
 static int
-ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
 {
        ip_set_id_t index = IPSET_INVALID_ID, max;
        struct ip_set *set = NULL;
@@ -1537,18 +1547,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
        bool is_destroyed;
        int ret = 0;
 
-       if (!cb->args[IPSET_CB_DUMP]) {
-               ret = dump_init(cb, inst);
-               if (ret < 0) {
-                       nlh = nlmsg_hdr(cb->skb);
-                       /* We have to create and send the error message
-                        * manually :-(
-                        */
-                       if (nlh->nlmsg_flags & NLM_F_ACK)
-                               netlink_ack(cb->skb, nlh, ret, NULL);
-                       return ret;
-               }
-       }
+       if (!cb->args[IPSET_CB_DUMP])
+               return -EINVAL;
 
        if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
                goto out;
@@ -1684,7 +1684,8 @@ static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
 
        {
                struct netlink_dump_control c = {
-                       .dump = ip_set_dump_start,
+                       .start = ip_set_dump_start,
+                       .dump = ip_set_dump_do,
                        .done = ip_set_dump_done,
                };
                return netlink_dump_start(ctnl, skb, nlh, &c);
index f4c4b46..d130542 100644 (file)
@@ -2248,8 +2248,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
 
-       hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
-                             GFP_KERNEL | __GFP_ZERO);
+       hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
 
        if (hash && nulls)
                for (i = 0; i < nr_slots; i++)
index 7e91989..8af28e1 100644 (file)
@@ -529,9 +529,9 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
                                          struct net_device *dev)
 {
-       nf_flow_table_offload_flush(flowtable);
        nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
        flush_delayed_work(&flowtable->gc_work);
+       nf_flow_table_offload_flush(flowtable);
 }
 
 void nf_flow_table_cleanup(struct net_device *dev)
@@ -553,6 +553,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
        cancel_delayed_work_sync(&flow_table->gc_work);
        nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
        nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
+       nf_flow_table_offload_flush(flow_table);
        rhashtable_destroy(&flow_table->rhashtable);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
index c8b70ff..83e1db3 100644 (file)
@@ -675,6 +675,7 @@ static void flow_offload_work_del(struct flow_offload_work *offload)
 {
        flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
        flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
+       set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
 }
 
 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
index ce70c25..e27c6c5 100644 (file)
@@ -939,14 +939,14 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
  *
  * @size: number of entries
  *
- * Return: NULL or kmalloc'd or vmalloc'd array
+ * Return: NULL or zeroed kmalloc'd or vmalloc'd array
  */
 unsigned int *xt_alloc_entry_offsets(unsigned int size)
 {
        if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
                return NULL;
 
-       return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
+       return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
 
 }
 EXPORT_SYMBOL(xt_alloc_entry_offsets);
index 781e0b4..0a97080 100644 (file)
@@ -103,7 +103,7 @@ static DEFINE_SPINLOCK(recent_lock);
 static DEFINE_MUTEX(recent_mutex);
 
 #ifdef CONFIG_PROC_FS
-static const struct file_operations recent_mt_fops;
+static const struct proc_ops recent_mt_proc_ops;
 #endif
 
 static u_int32_t hash_rnd __read_mostly;
@@ -405,7 +405,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
                goto out;
        }
        pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
-                 &recent_mt_fops, t);
+                              &recent_mt_proc_ops, t);
        if (pde == NULL) {
                recent_table_free(t);
                ret = -ENOMEM;
@@ -616,13 +616,12 @@ recent_mt_proc_write(struct file *file, const char __user *input,
        return size + 1;
 }
 
-static const struct file_operations recent_mt_fops = {
-       .open    = recent_seq_open,
-       .read    = seq_read,
-       .write   = recent_mt_proc_write,
-       .release = seq_release_private,
-       .owner   = THIS_MODULE,
-       .llseek = seq_lseek,
+static const struct proc_ops recent_mt_proc_ops = {
+       .proc_open      = recent_seq_open,
+       .proc_read      = seq_read,
+       .proc_write     = recent_mt_proc_write,
+       .proc_release   = seq_release_private,
+       .proc_lseek     = seq_lseek,
 };
 
 static int __net_init recent_proc_net_init(struct net *net)
index 9d3c4d2..fe42f98 100644 (file)
@@ -194,6 +194,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 service_in_use:
        write_unlock(&local->services_lock);
        rxrpc_unuse_local(local);
+       rxrpc_put_local(local);
        ret = -EADDRINUSE;
 error_unlock:
        release_sock(&rx->sk);
@@ -899,6 +900,7 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_purge_queue(&sk->sk_receive_queue);
 
        rxrpc_unuse_local(rx->local);
+       rxrpc_put_local(rx->local);
        rx->local = NULL;
        key_put(rx->key);
        rx->key = NULL;
index 5e99df8..7d730c4 100644 (file)
@@ -490,6 +490,7 @@ enum rxrpc_call_flag {
        RXRPC_CALL_RX_HEARD,            /* The peer responded at least once to this call */
        RXRPC_CALL_RX_UNDERRUN,         /* Got data underrun */
        RXRPC_CALL_IS_INTR,             /* The call is interruptible */
+       RXRPC_CALL_DISCONNECTED,        /* The call has been disconnected */
 };
 
 /*
@@ -1021,6 +1022,16 @@ void rxrpc_unuse_local(struct rxrpc_local *);
 void rxrpc_queue_local(struct rxrpc_local *);
 void rxrpc_destroy_all_locals(struct rxrpc_net *);
 
+static inline bool __rxrpc_unuse_local(struct rxrpc_local *local)
+{
+       return atomic_dec_return(&local->active_users) == 0;
+}
+
+static inline bool __rxrpc_use_local(struct rxrpc_local *local)
+{
+       return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0;
+}
+
 /*
  * misc.c
  */
index a31c18c..dbdbc4f 100644 (file)
@@ -493,7 +493,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 
        _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 
-       if (conn)
+       if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
                rxrpc_disconnect_call(call);
        if (call->security)
                call->security->free_call_crypto(call);
@@ -569,6 +569,7 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
        struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
        struct rxrpc_net *rxnet = call->rxnet;
 
+       rxrpc_put_connection(call->conn);
        rxrpc_put_peer(call->peer);
        kfree(call->rxtx_buffer);
        kfree(call->rxtx_annotations);
@@ -590,7 +591,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
 
        ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
        ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
-       ASSERTCMP(call->conn, ==, NULL);
 
        rxrpc_cleanup_ring(call);
        rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
index 376370c..ea7d4c2 100644 (file)
@@ -785,6 +785,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
        u32 cid;
 
        spin_lock(&conn->channel_lock);
+       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
 
        cid = call->cid;
        if (cid) {
@@ -792,7 +793,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
                chan = &conn->channels[channel];
        }
        trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
-       call->conn = NULL;
 
        /* Calls that have never actually been assigned a channel can simply be
         * discarded.  If the conn didn't get used either, it will follow
@@ -908,7 +908,6 @@ out:
        spin_unlock(&rxnet->client_conn_cache_lock);
 out_2:
        spin_unlock(&conn->channel_lock);
-       rxrpc_put_connection(conn);
        _leave("");
        return;
 
index 808a472..06fcff2 100644 (file)
@@ -438,16 +438,12 @@ again:
 /*
  * connection-level event processor
  */
-void rxrpc_process_connection(struct work_struct *work)
+static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
 {
-       struct rxrpc_connection *conn =
-               container_of(work, struct rxrpc_connection, processor);
        struct sk_buff *skb;
        u32 abort_code = RX_PROTOCOL_ERROR;
        int ret;
 
-       rxrpc_see_connection(conn);
-
        if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
                rxrpc_secure_connection(conn);
 
@@ -475,18 +471,32 @@ void rxrpc_process_connection(struct work_struct *work)
                }
        }
 
-out:
-       rxrpc_put_connection(conn);
-       _leave("");
        return;
 
 requeue_and_leave:
        skb_queue_head(&conn->rx_queue, skb);
-       goto out;
+       return;
 
 protocol_error:
        if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
                goto requeue_and_leave;
        rxrpc_free_skb(skb, rxrpc_skb_freed);
-       goto out;
+       return;
+}
+
+void rxrpc_process_connection(struct work_struct *work)
+{
+       struct rxrpc_connection *conn =
+               container_of(work, struct rxrpc_connection, processor);
+
+       rxrpc_see_connection(conn);
+
+       if (__rxrpc_use_local(conn->params.local)) {
+               rxrpc_do_process_connection(conn);
+               rxrpc_unuse_local(conn->params.local);
+       }
+
+       rxrpc_put_connection(conn);
+       _leave("");
+       return;
 }
index 38d718e..c0b3154 100644 (file)
@@ -171,6 +171,8 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
 
        _enter("%d,%x", conn->debug_id, call->cid);
 
+       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+
        if (rcu_access_pointer(chan->call) == call) {
                /* Save the result of the call so that we can repeat it if necessary
                 * through the channel, whilst disposing of the actual call record.
@@ -223,9 +225,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
        __rxrpc_disconnect_call(conn, call);
        spin_unlock(&conn->channel_lock);
 
-       call->conn = NULL;
        conn->idle_timestamp = jiffies;
-       rxrpc_put_connection(conn);
 }
 
 /*
index 96d54e5..ef10fbf 100644 (file)
@@ -599,10 +599,8 @@ ack:
                                  false, true,
                                  rxrpc_propose_ack_input_data);
 
-       if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
-               trace_rxrpc_notify_socket(call->debug_id, serial);
-               rxrpc_notify_socket(call);
-       }
+       trace_rxrpc_notify_socket(call->debug_id, serial);
+       rxrpc_notify_socket(call);
 
 unlock:
        spin_unlock(&call->input_lock);
index 3658726..a6c1349 100644 (file)
@@ -364,11 +364,14 @@ void rxrpc_queue_local(struct rxrpc_local *local)
 void rxrpc_put_local(struct rxrpc_local *local)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id;
        int n;
 
        if (local) {
+               debug_id = local->debug_id;
+
                n = atomic_dec_return(&local->usage);
-               trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
+               trace_rxrpc_local(debug_id, rxrpc_local_put, n, here);
 
                if (n == 0)
                        call_rcu(&local->rcu, rxrpc_local_rcu);
@@ -380,14 +383,11 @@ void rxrpc_put_local(struct rxrpc_local *local)
  */
 struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
 {
-       unsigned int au;
-
        local = rxrpc_get_local_maybe(local);
        if (!local)
                return NULL;
 
-       au = atomic_fetch_add_unless(&local->active_users, 1, 0);
-       if (au == 0) {
+       if (!__rxrpc_use_local(local)) {
                rxrpc_put_local(local);
                return NULL;
        }
@@ -401,14 +401,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
  */
 void rxrpc_unuse_local(struct rxrpc_local *local)
 {
-       unsigned int au;
-
        if (local) {
-               au = atomic_dec_return(&local->active_users);
-               if (au == 0)
+               if (__rxrpc_unuse_local(local)) {
+                       rxrpc_get_local(local);
                        rxrpc_queue_local(local);
-               else
-                       rxrpc_put_local(local);
+               }
        }
 }
 
@@ -465,7 +462,7 @@ static void rxrpc_local_processor(struct work_struct *work)
 
        do {
                again = false;
-               if (atomic_read(&local->active_users) == 0) {
+               if (!__rxrpc_use_local(local)) {
                        rxrpc_local_destroyer(local);
                        break;
                }
@@ -479,6 +476,8 @@ static void rxrpc_local_processor(struct work_struct *work)
                        rxrpc_process_local_events(local);
                        again = true;
                }
+
+               __rxrpc_unuse_local(local);
        } while (again);
 
        rxrpc_put_local(local);
index 935bb60..bad3d24 100644 (file)
@@ -129,7 +129,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
 int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
                          rxrpc_serial_t *_serial)
 {
-       struct rxrpc_connection *conn = NULL;
+       struct rxrpc_connection *conn;
        struct rxrpc_ack_buffer *pkt;
        struct msghdr msg;
        struct kvec iov[2];
@@ -139,18 +139,14 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        int ret;
        u8 reason;
 
-       spin_lock_bh(&call->lock);
-       if (call->conn)
-               conn = rxrpc_get_connection_maybe(call->conn);
-       spin_unlock_bh(&call->lock);
-       if (!conn)
+       if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
                return -ECONNRESET;
 
        pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
-       if (!pkt) {
-               rxrpc_put_connection(conn);
+       if (!pkt)
                return -ENOMEM;
-       }
+
+       conn = call->conn;
 
        msg.msg_name    = &call->peer->srx.transport;
        msg.msg_namelen = call->peer->srx.transport_len;
@@ -244,7 +240,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        }
 
 out:
-       rxrpc_put_connection(conn);
        kfree(pkt);
        return ret;
 }
@@ -254,7 +249,7 @@ out:
  */
 int rxrpc_send_abort_packet(struct rxrpc_call *call)
 {
-       struct rxrpc_connection *conn = NULL;
+       struct rxrpc_connection *conn;
        struct rxrpc_abort_buffer pkt;
        struct msghdr msg;
        struct kvec iov[1];
@@ -271,13 +266,11 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
            test_bit(RXRPC_CALL_TX_LAST, &call->flags))
                return 0;
 
-       spin_lock_bh(&call->lock);
-       if (call->conn)
-               conn = rxrpc_get_connection_maybe(call->conn);
-       spin_unlock_bh(&call->lock);
-       if (!conn)
+       if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
                return -ECONNRESET;
 
+       conn = call->conn;
+
        msg.msg_name    = &call->peer->srx.transport;
        msg.msg_namelen = call->peer->srx.transport_len;
        msg.msg_control = NULL;
@@ -312,8 +305,6 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
                trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
                                      rxrpc_tx_point_call_abort);
        rxrpc_tx_backoff(call, ret);
-
-       rxrpc_put_connection(conn);
        return ret;
 }
 
index 48f67a9..923b263 100644 (file)
@@ -364,27 +364,31 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
                if (!rxrpc_get_peer_maybe(peer))
                        continue;
 
-               spin_unlock_bh(&rxnet->peer_hash_lock);
-
-               keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
-               slot = keepalive_at - base;
-               _debug("%02x peer %u t=%d {%pISp}",
-                      cursor, peer->debug_id, slot, &peer->srx.transport);
+               if (__rxrpc_use_local(peer->local)) {
+                       spin_unlock_bh(&rxnet->peer_hash_lock);
+
+                       keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+                       slot = keepalive_at - base;
+                       _debug("%02x peer %u t=%d {%pISp}",
+                              cursor, peer->debug_id, slot, &peer->srx.transport);
+
+                       if (keepalive_at <= base ||
+                           keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
+                               rxrpc_send_keepalive(peer);
+                               slot = RXRPC_KEEPALIVE_TIME;
+                       }
 
-               if (keepalive_at <= base ||
-                   keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
-                       rxrpc_send_keepalive(peer);
-                       slot = RXRPC_KEEPALIVE_TIME;
+                       /* A transmission to this peer occurred since last we
+                        * examined it so put it into the appropriate future
+                        * bucket.
+                        */
+                       slot += cursor;
+                       slot &= mask;
+                       spin_lock_bh(&rxnet->peer_hash_lock);
+                       list_add_tail(&peer->keepalive_link,
+                                     &rxnet->peer_keepalive[slot & mask]);
+                       rxrpc_unuse_local(peer->local);
                }
-
-               /* A transmission to this peer occurred since last we examined
-                * it so put it into the appropriate future bucket.
-                */
-               slot += cursor;
-               slot &= mask;
-               spin_lock_bh(&rxnet->peer_hash_lock);
-               list_add_tail(&peer->keepalive_link,
-                             &rxnet->peer_keepalive[slot & mask]);
                rxrpc_put_peer_locked(peer);
        }
 
index c226241..d36949d 100644 (file)
@@ -463,10 +463,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
 
 static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
        [TCA_RSVP_CLASSID]      = { .type = NLA_U32 },
-       [TCA_RSVP_DST]          = { .type = NLA_BINARY,
-                                   .len = RSVP_DST_LEN * sizeof(u32) },
-       [TCA_RSVP_SRC]          = { .type = NLA_BINARY,
-                                   .len = RSVP_DST_LEN * sizeof(u32) },
+       [TCA_RSVP_DST]          = { .len = RSVP_DST_LEN * sizeof(u32) },
+       [TCA_RSVP_SRC]          = { .len = RSVP_DST_LEN * sizeof(u32) },
        [TCA_RSVP_PINFO]        = { .len = sizeof(struct tc_rsvp_pinfo) },
 };
 
index 3d4a128..0323aee 100644 (file)
@@ -333,12 +333,31 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        cp->fall_through = p->fall_through;
        cp->tp = tp;
 
+       if (tb[TCA_TCINDEX_HASH])
+               cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
+
+       if (tb[TCA_TCINDEX_MASK])
+               cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
+
+       if (tb[TCA_TCINDEX_SHIFT])
+               cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
+
+       if (!cp->hash) {
+               /* Hash not specified, use perfect hash if the upper limit
+                * of the hashing index is below the threshold.
+                */
+               if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
+                       cp->hash = (cp->mask >> cp->shift) + 1;
+               else
+                       cp->hash = DEFAULT_HASH_SIZE;
+       }
+
        if (p->perfect) {
                int i;
 
                if (tcindex_alloc_perfect_hash(net, cp) < 0)
                        goto errout;
-               for (i = 0; i < cp->hash; i++)
+               for (i = 0; i < min(cp->hash, p->hash); i++)
                        cp->perfect[i].res = p->perfect[i].res;
                balloc = 1;
        }
@@ -350,15 +369,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        if (old_r)
                cr = r->res;
 
-       if (tb[TCA_TCINDEX_HASH])
-               cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
-
-       if (tb[TCA_TCINDEX_MASK])
-               cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
-
-       if (tb[TCA_TCINDEX_SHIFT])
-               cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
        err = -EBUSY;
 
        /* Hash already allocated, make sure that we still meet the
@@ -376,16 +386,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        if (tb[TCA_TCINDEX_FALL_THROUGH])
                cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
 
-       if (!cp->hash) {
-               /* Hash not specified, use perfect hash if the upper limit
-                * of the hashing index is below the threshold.
-                */
-               if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
-                       cp->hash = (cp->mask >> cp->shift) + 1;
-               else
-                       cp->hash = DEFAULT_HASH_SIZE;
-       }
-
        if (!cp->perfect && !cp->h)
                cp->alloc_hash = cp->hash;
 
index 3111817..7511a68 100644 (file)
@@ -1428,10 +1428,10 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
        return len;
 }
 
-static const struct file_operations use_gss_proxy_ops = {
-       .open = nonseekable_open,
-       .write = write_gssp,
-       .read = read_gssp,
+static const struct proc_ops use_gss_proxy_proc_ops = {
+       .proc_open      = nonseekable_open,
+       .proc_write     = write_gssp,
+       .proc_read      = read_gssp,
 };
 
 static int create_use_gss_proxy_proc_entry(struct net *net)
@@ -1442,7 +1442,7 @@ static int create_use_gss_proxy_proc_entry(struct net *net)
        sn->use_gss_proxy = -1;
        *p = proc_create_data("use-gss-proxy", S_IFREG | 0600,
                              sn->proc_net_rpc,
-                             &use_gss_proxy_ops, net);
+                             &use_gss_proxy_proc_ops, net);
        if (!*p)
                return -ENOMEM;
        init_gssp_clnt(sn);
index d996bf8..375914c 100644 (file)
@@ -1571,15 +1571,14 @@ static int cache_release_procfs(struct inode *inode, struct file *filp)
        return cache_release(inode, filp, cd);
 }
 
-static const struct file_operations cache_file_operations_procfs = {
-       .owner          = THIS_MODULE,
-       .llseek         = no_llseek,
-       .read           = cache_read_procfs,
-       .write          = cache_write_procfs,
-       .poll           = cache_poll_procfs,
-       .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
-       .open           = cache_open_procfs,
-       .release        = cache_release_procfs,
+static const struct proc_ops cache_channel_proc_ops = {
+       .proc_lseek     = no_llseek,
+       .proc_read      = cache_read_procfs,
+       .proc_write     = cache_write_procfs,
+       .proc_poll      = cache_poll_procfs,
+       .proc_ioctl     = cache_ioctl_procfs, /* for FIONREAD */
+       .proc_open      = cache_open_procfs,
+       .proc_release   = cache_release_procfs,
 };
 
 static int content_open_procfs(struct inode *inode, struct file *filp)
@@ -1596,11 +1595,11 @@ static int content_release_procfs(struct inode *inode, struct file *filp)
        return content_release(inode, filp, cd);
 }
 
-static const struct file_operations content_file_operations_procfs = {
-       .open           = content_open_procfs,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = content_release_procfs,
+static const struct proc_ops content_proc_ops = {
+       .proc_open      = content_open_procfs,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = content_release_procfs,
 };
 
 static int open_flush_procfs(struct inode *inode, struct file *filp)
@@ -1634,12 +1633,12 @@ static ssize_t write_flush_procfs(struct file *filp,
        return write_flush(filp, buf, count, ppos, cd);
 }
 
-static const struct file_operations cache_flush_operations_procfs = {
-       .open           = open_flush_procfs,
-       .read           = read_flush_procfs,
-       .write          = write_flush_procfs,
-       .release        = release_flush_procfs,
-       .llseek         = no_llseek,
+static const struct proc_ops cache_flush_proc_ops = {
+       .proc_open      = open_flush_procfs,
+       .proc_read      = read_flush_procfs,
+       .proc_write     = write_flush_procfs,
+       .proc_release   = release_flush_procfs,
+       .proc_lseek     = no_llseek,
 };
 
 static void remove_cache_proc_entries(struct cache_detail *cd)
@@ -1662,19 +1661,19 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
                goto out_nomem;
 
        p = proc_create_data("flush", S_IFREG | 0600,
-                            cd->procfs, &cache_flush_operations_procfs, cd);
+                            cd->procfs, &cache_flush_proc_ops, cd);
        if (p == NULL)
                goto out_nomem;
 
        if (cd->cache_request || cd->cache_parse) {
                p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
-                                    &cache_file_operations_procfs, cd);
+                                    &cache_channel_proc_ops, cd);
                if (p == NULL)
                        goto out_nomem;
        }
        if (cd->cache_show) {
                p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
-                                    &content_file_operations_procfs, cd);
+                                    &content_proc_ops, cd);
                if (p == NULL)
                        goto out_nomem;
        }
index 7c74197..c964b48 100644 (file)
@@ -69,12 +69,11 @@ static int rpc_proc_open(struct inode *inode, struct file *file)
        return single_open(file, rpc_proc_show, PDE_DATA(inode));
 }
 
-static const struct file_operations rpc_proc_fops = {
-       .owner = THIS_MODULE,
-       .open = rpc_proc_open,
-       .read  = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
+static const struct proc_ops rpc_proc_ops = {
+       .proc_open      = rpc_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = single_release,
 };
 
 /*
@@ -281,19 +280,19 @@ EXPORT_SYMBOL_GPL(rpc_clnt_show_stats);
  */
 static inline struct proc_dir_entry *
 do_register(struct net *net, const char *name, void *data,
-           const struct file_operations *fops)
+           const struct proc_ops *proc_ops)
 {
        struct sunrpc_net *sn;
 
        dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
        sn = net_generic(net, sunrpc_net_id);
-       return proc_create_data(name, 0, sn->proc_net_rpc, fops, data);
+       return proc_create_data(name, 0, sn->proc_net_rpc, proc_ops, data);
 }
 
 struct proc_dir_entry *
 rpc_proc_register(struct net *net, struct rpc_stat *statp)
 {
-       return do_register(net, statp->program->name, statp, &rpc_proc_fops);
+       return do_register(net, statp->program->name, statp, &rpc_proc_ops);
 }
 EXPORT_SYMBOL_GPL(rpc_proc_register);
 
@@ -308,9 +307,9 @@ rpc_proc_unregister(struct net *net, const char *name)
 EXPORT_SYMBOL_GPL(rpc_proc_unregister);
 
 struct proc_dir_entry *
-svc_proc_register(struct net *net, struct svc_stat *statp, const struct file_operations *fops)
+svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
 {
-       return do_register(net, statp->program->pg_name, statp, fops);
+       return do_register(net, statp->program->pg_name, statp, proc_ops);
 }
 EXPORT_SYMBOL_GPL(svc_proc_register);
 
index 297d1eb..dbda08e 100644 (file)
@@ -3189,7 +3189,7 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
                                            flags | XFRM_LOOKUP_QUEUE |
                                            XFRM_LOOKUP_KEEP_DST_REF);
 
-       if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
+       if (PTR_ERR(dst) == -EREMOTE)
                return make_blackhole(net, dst_orig->ops->family, dst_orig);
 
        if (IS_ERR(dst))
index 9ca3e44..c406f03 100644 (file)
@@ -142,11 +142,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
        return ret ? ret : copied;
 }
 
-static const struct file_operations fifo_fops = {
-       .owner          = THIS_MODULE,
-       .read           = fifo_read,
-       .write          = fifo_write,
-       .llseek         = noop_llseek,
+static const struct proc_ops fifo_proc_ops = {
+       .proc_read      = fifo_read,
+       .proc_write     = fifo_write,
+       .proc_lseek     = noop_llseek,
 };
 
 static int __init example_init(void)
@@ -169,7 +168,7 @@ static int __init example_init(void)
                return -EIO;
        }
 
-       if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
+       if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
 #ifdef DYNAMIC
                kfifo_free(&test);
 #endif
index 6cdeb72..78977fc 100644 (file)
@@ -135,11 +135,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
        return ret ? ret : copied;
 }
 
-static const struct file_operations fifo_fops = {
-       .owner          = THIS_MODULE,
-       .read           = fifo_read,
-       .write          = fifo_write,
-       .llseek         = noop_llseek,
+static const struct proc_ops fifo_proc_ops = {
+       .proc_read      = fifo_read,
+       .proc_write     = fifo_write,
+       .proc_lseek     = noop_llseek,
 };
 
 static int __init example_init(void)
@@ -160,7 +159,7 @@ static int __init example_init(void)
                return -EIO;
        }
 
-       if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
+       if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
 #ifdef DYNAMIC
                kfifo_free(&test);
 #endif
index 79ae8bb..c507998 100644 (file)
@@ -149,11 +149,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
        return ret ? ret : copied;
 }
 
-static const struct file_operations fifo_fops = {
-       .owner          = THIS_MODULE,
-       .read           = fifo_read,
-       .write          = fifo_write,
-       .llseek         = noop_llseek,
+static const struct proc_ops fifo_proc_ops = {
+       .proc_read      = fifo_read,
+       .proc_write     = fifo_write,
+       .proc_lseek     = noop_llseek,
 };
 
 static int __init example_init(void)
@@ -176,7 +175,7 @@ static int __init example_init(void)
                return -EIO;
        }
 
-       if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
+       if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
 #ifdef DYNAMIC
                kfifo_free(&test);
 #endif
index 9330d42..3357bf4 100644 (file)
@@ -90,8 +90,6 @@ position p;
  kfree@p(x)
 |
  kzfree@p(x)
-|
- __krealloc@p(x, ...)
 |
  krealloc@p(x, ...)
 |
@@ -116,8 +114,6 @@ position p != safe.p;
 |
 * kzfree@p(x)
 |
-* __krealloc@p(x, ...)
-|
 * krealloc@p(x, ...)
 |
 * free_pages@p(x, ...)
index ecea41c..8bc7b04 100644 (file)
@@ -2831,42 +2831,39 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
                                int addrlen)
 {
        int rc = 0;
-#if IS_ENABLED(CONFIG_IPV6)
-       struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
-#endif
-#ifdef SMACK_IPV6_SECMARK_LABELING
-       struct smack_known *rsp;
-       struct socket_smack *ssp;
-#endif
 
        if (sock->sk == NULL)
                return 0;
-
+       if (sock->sk->sk_family != PF_INET &&
+           (!IS_ENABLED(CONFIG_IPV6) || sock->sk->sk_family != PF_INET6))
+               return 0;
+       if (addrlen < offsetofend(struct sockaddr, sa_family))
+               return 0;
+       if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
+               struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
 #ifdef SMACK_IPV6_SECMARK_LABELING
-       ssp = sock->sk->sk_security;
+               struct smack_known *rsp;
 #endif
 
-       switch (sock->sk->sk_family) {
-       case PF_INET:
-               if (addrlen < sizeof(struct sockaddr_in) ||
-                   sap->sa_family != AF_INET)
-                       return -EINVAL;
-               rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
-               break;
-       case PF_INET6:
-               if (addrlen < SIN6_LEN_RFC2133 || sap->sa_family != AF_INET6)
-                       return -EINVAL;
+               if (addrlen < SIN6_LEN_RFC2133)
+                       return 0;
 #ifdef SMACK_IPV6_SECMARK_LABELING
                rsp = smack_ipv6host_label(sip);
-               if (rsp != NULL)
+               if (rsp != NULL) {
+                       struct socket_smack *ssp = sock->sk->sk_security;
+
                        rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
-                                               SMK_CONNECTING);
+                                           SMK_CONNECTING);
+               }
 #endif
 #ifdef SMACK_IPV6_PORT_LABELING
                rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
 #endif
-               break;
+               return rc;
        }
+       if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
+               return 0;
+       rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
        return rc;
 }
 
index 6801d81..ca87ae4 100644 (file)
@@ -282,17 +282,16 @@ static int snd_info_entry_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static const struct file_operations snd_info_entry_operations =
-{
-       .owner =                THIS_MODULE,
-       .llseek =               snd_info_entry_llseek,
-       .read =                 snd_info_entry_read,
-       .write =                snd_info_entry_write,
-       .poll =                 snd_info_entry_poll,
-       .unlocked_ioctl =       snd_info_entry_ioctl,
-       .mmap =                 snd_info_entry_mmap,
-       .open =                 snd_info_entry_open,
-       .release =              snd_info_entry_release,
+static const struct proc_ops snd_info_entry_operations =
+{
+       .proc_lseek     = snd_info_entry_llseek,
+       .proc_read      = snd_info_entry_read,
+       .proc_write     = snd_info_entry_write,
+       .proc_poll      = snd_info_entry_poll,
+       .proc_ioctl     = snd_info_entry_ioctl,
+       .proc_mmap      = snd_info_entry_mmap,
+       .proc_open      = snd_info_entry_open,
+       .proc_release   = snd_info_entry_release,
 };
 
 /*
@@ -421,14 +420,13 @@ static int snd_info_text_entry_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static const struct file_operations snd_info_text_entry_ops =
+static const struct proc_ops snd_info_text_entry_ops =
 {
-       .owner =                THIS_MODULE,
-       .open =                 snd_info_text_entry_open,
-       .release =              snd_info_text_entry_release,
-       .write =                snd_info_text_entry_write,
-       .llseek =               seq_lseek,
-       .read =                 seq_read,
+       .proc_open      = snd_info_text_entry_open,
+       .proc_release   = snd_info_text_entry_release,
+       .proc_write     = snd_info_text_entry_write,
+       .proc_lseek     = seq_lseek,
+       .proc_read      = seq_read,
 };
 
 static struct snd_info_entry *create_subdir(struct module *mod,
@@ -810,7 +808,7 @@ static int __snd_info_register(struct snd_info_entry *entry)
                        return -ENOMEM;
                }
        } else {
-               const struct file_operations *ops;
+               const struct proc_ops *ops;
                if (entry->content == SNDRV_INFO_CONTENT_DATA)
                        ops = &snd_info_entry_operations;
                else
index 967c689..590a46a 100644 (file)
@@ -156,7 +156,7 @@ static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
 #endif /* CONFIG_X86_X32 */
 
 struct compat_snd_pcm_status64 {
-       s32 state;
+       snd_pcm_state_t state;
        u8 rsvd[4]; /* alignment */
        s64 trigger_tstamp_sec;
        s64 trigger_tstamp_nsec;
@@ -168,7 +168,7 @@ struct compat_snd_pcm_status64 {
        u32 avail;
        u32 avail_max;
        u32 overrange;
-       s32 suspended_state;
+       snd_pcm_state_t suspended_state;
        u32 audio_tstamp_data;
        s64 audio_tstamp_sec;
        s64 audio_tstamp_nsec;
@@ -376,13 +376,13 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
 #ifdef CONFIG_X86_X32
 /* X32 ABI has 64bit timespec and 64bit alignment */
 struct snd_pcm_mmap_status_x32 {
-       s32 state;
+       snd_pcm_state_t state;
        s32 pad1;
        u32 hw_ptr;
        u32 pad2; /* alignment */
        s64 tstamp_sec;
        s64 tstamp_nsec;
-       s32 suspended_state;
+       snd_pcm_state_t suspended_state;
        s32 pad3;
        s64 audio_tstamp_sec;
        s64 audio_tstamp_nsec;
index bb23f50..336406b 100644 (file)
@@ -551,7 +551,8 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime)
        return usecs;
 }
 
-static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_set_state(struct snd_pcm_substream *substream,
+                             snd_pcm_state_t state)
 {
        snd_pcm_stream_lock_irq(substream);
        if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
@@ -786,10 +787,22 @@ end:
        return err;
 }
 
+static int do_hw_free(struct snd_pcm_substream *substream)
+{
+       int result = 0;
+
+       snd_pcm_sync_stop(substream);
+       if (substream->ops->hw_free)
+               result = substream->ops->hw_free(substream);
+       if (substream->managed_buffer_alloc)
+               snd_pcm_lib_free_pages(substream);
+       return result;
+}
+
 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime;
-       int result = 0;
+       int result;
 
        if (PCM_RUNTIME_CHECK(substream))
                return -ENXIO;
@@ -806,11 +819,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
        snd_pcm_stream_unlock_irq(substream);
        if (atomic_read(&substream->mmap_count))
                return -EBADFD;
-       snd_pcm_sync_stop(substream);
-       if (substream->ops->hw_free)
-               result = substream->ops->hw_free(substream);
-       if (substream->managed_buffer_alloc)
-               snd_pcm_lib_free_pages(substream);
+       result = do_hw_free(substream);
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
        pm_qos_remove_request(&substream->latency_pm_qos_req);
        return result;
@@ -1097,11 +1106,17 @@ static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
        runtime->trigger_master = NULL;
 }
 
+#define ACTION_ARG_IGNORE      (__force snd_pcm_state_t)0
+
 struct action_ops {
-       int (*pre_action)(struct snd_pcm_substream *substream, int state);
-       int (*do_action)(struct snd_pcm_substream *substream, int state);
-       void (*undo_action)(struct snd_pcm_substream *substream, int state);
-       void (*post_action)(struct snd_pcm_substream *substream, int state);
+       int (*pre_action)(struct snd_pcm_substream *substream,
+                         snd_pcm_state_t state);
+       int (*do_action)(struct snd_pcm_substream *substream,
+                        snd_pcm_state_t state);
+       void (*undo_action)(struct snd_pcm_substream *substream,
+                           snd_pcm_state_t state);
+       void (*post_action)(struct snd_pcm_substream *substream,
+                           snd_pcm_state_t state);
 };
 
 /*
@@ -1111,7 +1126,8 @@ struct action_ops {
  */
 static int snd_pcm_action_group(const struct action_ops *ops,
                                struct snd_pcm_substream *substream,
-                               int state, int do_lock)
+                               snd_pcm_state_t state,
+                               bool do_lock)
 {
        struct snd_pcm_substream *s = NULL;
        struct snd_pcm_substream *s1;
@@ -1168,7 +1184,7 @@ static int snd_pcm_action_group(const struct action_ops *ops,
  */
 static int snd_pcm_action_single(const struct action_ops *ops,
                                 struct snd_pcm_substream *substream,
-                                int state)
+                                snd_pcm_state_t state)
 {
        int res;
        
@@ -1249,14 +1265,14 @@ snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
  */
 static int snd_pcm_action(const struct action_ops *ops,
                          struct snd_pcm_substream *substream,
-                         int state)
+                         snd_pcm_state_t state)
 {
        struct snd_pcm_group *group;
        int res;
 
        group = snd_pcm_stream_group_ref(substream);
        if (group)
-               res = snd_pcm_action_group(ops, substream, state, 1);
+               res = snd_pcm_action_group(ops, substream, state, true);
        else
                res = snd_pcm_action_single(ops, substream, state);
        snd_pcm_group_unref(group, substream);
@@ -1268,7 +1284,7 @@ static int snd_pcm_action(const struct action_ops *ops,
  */
 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
                                   struct snd_pcm_substream *substream,
-                                  int state)
+                                  snd_pcm_state_t state)
 {
        int res;
 
@@ -1282,14 +1298,14 @@ static int snd_pcm_action_lock_irq(const struct action_ops *ops,
  */
 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
                                    struct snd_pcm_substream *substream,
-                                   int state)
+                                   snd_pcm_state_t state)
 {
        int res;
 
        /* Guarantee the group members won't change during non-atomic action */
        down_read(&snd_pcm_link_rwsem);
        if (snd_pcm_stream_linked(substream))
-               res = snd_pcm_action_group(ops, substream, state, 0);
+               res = snd_pcm_action_group(ops, substream, state, false);
        else
                res = snd_pcm_action_single(ops, substream, state);
        up_read(&snd_pcm_link_rwsem);
@@ -1299,7 +1315,8 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
 /*
  * start callbacks
  */
-static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_start(struct snd_pcm_substream *substream,
+                            snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
@@ -1312,20 +1329,23 @@ static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
        return 0;
 }
 
-static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_start(struct snd_pcm_substream *substream,
+                           snd_pcm_state_t state)
 {
        if (substream->runtime->trigger_master != substream)
                return 0;
        return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
 }
 
-static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
+                              snd_pcm_state_t state)
 {
        if (substream->runtime->trigger_master == substream)
                substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
 }
 
-static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_start(struct snd_pcm_substream *substream,
+                              snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        snd_pcm_trigger_tstamp(substream);
@@ -1369,7 +1389,8 @@ static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
 /*
  * stop callbacks
  */
-static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_stop(struct snd_pcm_substream *substream,
+                           snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
@@ -1378,7 +1399,8 @@ static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
        return 0;
 }
 
-static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_stop(struct snd_pcm_substream *substream,
+                          snd_pcm_state_t state)
 {
        if (substream->runtime->trigger_master == substream &&
            snd_pcm_running(substream))
@@ -1386,7 +1408,8 @@ static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
        return 0; /* unconditonally stop all substreams */
 }
 
-static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_stop(struct snd_pcm_substream *substream,
+                             snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        if (runtime->status->state != state) {
@@ -1457,14 +1480,17 @@ int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
 
 /*
- * pause callbacks
+ * pause callbacks: pass boolean (to start pause or resume) as state argument
  */
-static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
+#define pause_pushed(state)    (__force bool)(state)
+
+static int snd_pcm_pre_pause(struct snd_pcm_substream *substream,
+                            snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
                return -ENOSYS;
-       if (push) {
+       if (pause_pushed(state)) {
                if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
                        return -EBADFD;
        } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
@@ -1473,13 +1499,14 @@ static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
        return 0;
 }
 
-static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
+static int snd_pcm_do_pause(struct snd_pcm_substream *substream,
+                           snd_pcm_state_t state)
 {
        if (substream->runtime->trigger_master != substream)
                return 0;
        /* some drivers might use hw_ptr to recover from the pause -
           update the hw_ptr now */
-       if (push)
+       if (pause_pushed(state))
                snd_pcm_update_hw_ptr(substream);
        /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
         * a delta between the current jiffies, this gives a large enough
@@ -1487,23 +1514,27 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
         */
        substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
        return substream->ops->trigger(substream,
-                                      push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
-                                             SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
+                                      pause_pushed(state) ?
+                                      SNDRV_PCM_TRIGGER_PAUSE_PUSH :
+                                      SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 }
 
-static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
+static void snd_pcm_undo_pause(struct snd_pcm_substream *substream,
+                              snd_pcm_state_t state)
 {
        if (substream->runtime->trigger_master == substream)
                substream->ops->trigger(substream,
-                                       push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
+                                       pause_pushed(state) ?
+                                       SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
                                        SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 }
 
-static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
+static void snd_pcm_post_pause(struct snd_pcm_substream *substream,
+                              snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        snd_pcm_trigger_tstamp(substream);
-       if (push) {
+       if (pause_pushed(state)) {
                runtime->status->state = SNDRV_PCM_STATE_PAUSED;
                snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
                wake_up(&runtime->sleep);
@@ -1524,15 +1555,24 @@ static const struct action_ops snd_pcm_action_pause = {
 /*
  * Push/release the pause for all linked streams.
  */
-static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
+static int snd_pcm_pause(struct snd_pcm_substream *substream, bool push)
+{
+       return snd_pcm_action(&snd_pcm_action_pause, substream,
+                             (__force snd_pcm_state_t)push);
+}
+
+static int snd_pcm_pause_lock_irq(struct snd_pcm_substream *substream,
+                                 bool push)
 {
-       return snd_pcm_action(&snd_pcm_action_pause, substream, push);
+       return snd_pcm_action_lock_irq(&snd_pcm_action_pause, substream,
+                                      (__force snd_pcm_state_t)push);
 }
 
 #ifdef CONFIG_PM
-/* suspend */
+/* suspend callback: state argument ignored */
 
-static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream,
+                              snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        switch (runtime->status->state) {
@@ -1548,7 +1588,8 @@ static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
        return 0;
 }
 
-static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_suspend(struct snd_pcm_substream *substream,
+                             snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        if (runtime->trigger_master != substream)
@@ -1559,7 +1600,8 @@ static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
        return 0; /* suspend unconditionally */
 }
 
-static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_suspend(struct snd_pcm_substream *substream,
+                                snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        snd_pcm_trigger_tstamp(substream);
@@ -1590,7 +1632,8 @@ static int snd_pcm_suspend(struct snd_pcm_substream *substream)
        unsigned long flags;
 
        snd_pcm_stream_lock_irqsave(substream, flags);
-       err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
+       err = snd_pcm_action(&snd_pcm_action_suspend, substream,
+                            ACTION_ARG_IGNORE);
        snd_pcm_stream_unlock_irqrestore(substream, flags);
        return err;
 }
@@ -1634,9 +1677,10 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
 }
 EXPORT_SYMBOL(snd_pcm_suspend_all);
 
-/* resume */
+/* resume callbacks: state argument ignored */
 
-static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
+                             snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
@@ -1645,7 +1689,8 @@ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
        return 0;
 }
 
-static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_resume(struct snd_pcm_substream *substream,
+                            snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        if (runtime->trigger_master != substream)
@@ -1658,14 +1703,16 @@ static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
        return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
 }
 
-static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_undo_resume(struct snd_pcm_substream *substream,
+                               snd_pcm_state_t state)
 {
        if (substream->runtime->trigger_master == substream &&
            snd_pcm_running(substream))
                substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
 }
 
-static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_resume(struct snd_pcm_substream *substream,
+                               snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        snd_pcm_trigger_tstamp(substream);
@@ -1683,7 +1730,8 @@ static const struct action_ops snd_pcm_action_resume = {
 
 static int snd_pcm_resume(struct snd_pcm_substream *substream)
 {
-       return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
+       return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream,
+                                      ACTION_ARG_IGNORE);
 }
 
 #else
@@ -1724,7 +1772,9 @@ static int snd_pcm_xrun(struct snd_pcm_substream *substream)
 /*
  * reset ioctl
  */
-static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
+/* reset callbacks:  state argument ignored */
+static int snd_pcm_pre_reset(struct snd_pcm_substream *substream,
+                            snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        switch (runtime->status->state) {
@@ -1738,7 +1788,8 @@ static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
        }
 }
 
-static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_reset(struct snd_pcm_substream *substream,
+                           snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
@@ -1752,7 +1803,8 @@ static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
        return 0;
 }
 
-static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_reset(struct snd_pcm_substream *substream,
+                              snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        runtime->control->appl_ptr = runtime->status->hw_ptr;
@@ -1769,17 +1821,20 @@ static const struct action_ops snd_pcm_action_reset = {
 
 static int snd_pcm_reset(struct snd_pcm_substream *substream)
 {
-       return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
+       return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream,
+                                       ACTION_ARG_IGNORE);
 }
 
 /*
  * prepare ioctl
  */
-/* we use the second argument for updating f_flags */
+/* pass f_flags as state argument */
 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
-                              int f_flags)
+                              snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
+       int f_flags = (__force int)state;
+
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
            runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
                return -EBADFD;
@@ -1789,17 +1844,19 @@ static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_prepare(struct snd_pcm_substream *substream,
+                             snd_pcm_state_t state)
 {
        int err;
        snd_pcm_sync_stop(substream);
        err = substream->ops->prepare(substream);
        if (err < 0)
                return err;
-       return snd_pcm_do_reset(substream, 0);
+       return snd_pcm_do_reset(substream, state);
 }
 
-static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_prepare(struct snd_pcm_substream *substream,
+                                snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        runtime->control->appl_ptr = runtime->status->hw_ptr;
@@ -1832,7 +1889,7 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream,
        snd_pcm_stream_lock_irq(substream);
        switch (substream->runtime->status->state) {
        case SNDRV_PCM_STATE_PAUSED:
-               snd_pcm_pause(substream, 0);
+               snd_pcm_pause(substream, false);
                /* fallthru */
        case SNDRV_PCM_STATE_SUSPENDED:
                snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
@@ -1841,14 +1898,17 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream,
        snd_pcm_stream_unlock_irq(substream);
 
        return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
-                                       substream, f_flags);
+                                       substream,
+                                       (__force snd_pcm_state_t)f_flags);
 }
 
 /*
  * drain ioctl
  */
 
-static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
+/* drain init callbacks: state argument ignored */
+static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream,
+                                 snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        switch (runtime->status->state) {
@@ -1861,7 +1921,8 @@ static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state
        return 0;
 }
 
-static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream,
+                                snd_pcm_state_t state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -1887,7 +1948,9 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
        } else {
                /* stop running stream */
                if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
-                       int new_state = snd_pcm_capture_avail(runtime) > 0 ?
+                       snd_pcm_state_t new_state;
+
+                       new_state = snd_pcm_capture_avail(runtime) > 0 ?
                                SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
                        snd_pcm_do_stop(substream, new_state);
                        snd_pcm_post_stop(substream, new_state);
@@ -1903,7 +1966,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
        return 0;
 }
 
-static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream,
+                                   snd_pcm_state_t state)
 {
 }
 
@@ -1946,10 +2010,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
        snd_pcm_stream_lock_irq(substream);
        /* resume pause */
        if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
-               snd_pcm_pause(substream, 0);
+               snd_pcm_pause(substream, false);
 
        /* pre-start/stop - all running streams are changed to DRAINING state */
-       result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
+       result = snd_pcm_action(&snd_pcm_action_drain_init, substream,
+                               ACTION_ARG_IGNORE);
        if (result < 0)
                goto unlock;
        /* in non-blocking, we don't wait in ioctl but let caller poll */
@@ -2050,7 +2115,7 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
        snd_pcm_stream_lock_irq(substream);
        /* resume pause */
        if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
-               snd_pcm_pause(substream, 0);
+               snd_pcm_pause(substream, false);
 
        snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
        /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
@@ -2529,9 +2594,7 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
 
        snd_pcm_drop(substream);
        if (substream->hw_opened) {
-               if (substream->ops->hw_free &&
-                   substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
-                       substream->ops->hw_free(substream);
+               do_hw_free(substream);
                substream->ops->close(substream);
                substream->hw_opened = 0;
        }
@@ -2894,12 +2957,12 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
 }
 
 struct snd_pcm_mmap_status32 {
-       s32 state;
+       snd_pcm_state_t state;
        s32 pad1;
        u32 hw_ptr;
        s32 tstamp_sec;
        s32 tstamp_nsec;
-       s32 suspended_state;
+       snd_pcm_state_t suspended_state;
        s32 audio_tstamp_sec;
        s32 audio_tstamp_nsec;
 } __attribute__((packed));
@@ -3177,9 +3240,7 @@ static int snd_pcm_common_ioctl(struct file *file,
        case SNDRV_PCM_IOCTL_DROP:
                return snd_pcm_drop(substream);
        case SNDRV_PCM_IOCTL_PAUSE:
-               return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
-                                              substream,
-                                              (int)(unsigned long)arg);
+               return snd_pcm_pause_lock_irq(substream, (unsigned long)arg);
        case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
        case SNDRV_PCM_IOCTL_READI_FRAMES:
                return snd_pcm_xferi_frames_ioctl(substream, arg);
index da0bd89..02ac3f4 100644 (file)
@@ -903,7 +903,7 @@ static void print_formats(struct snd_dummy *dummy,
 {
        int i;
 
-       for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
+       for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
                if (dummy->pcm_hw.formats & (1ULL << i))
                        snd_iprintf(buffer, " %s", snd_pcm_format_name(i));
        }
index d01e691..a314b03 100644 (file)
@@ -631,20 +631,27 @@ void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
                nwait = 0;
                i = 0;
                list_for_each_entry(s, &bus->stream_list, list) {
-                       if (streams & (1 << i)) {
-                               if (start) {
-                                       /* check FIFO gets ready */
-                                       if (!(snd_hdac_stream_readb(s, SD_STS) &
-                                             SD_STS_FIFO_READY))
-                                               nwait++;
-                               } else {
-                                       /* check RUN bit is cleared */
-                                       if (snd_hdac_stream_readb(s, SD_CTL) &
-                                           SD_CTL_DMA_START)
-                                               nwait++;
+                       if (!(streams & (1 << i++)))
+                               continue;
+
+                       if (start) {
+                               /* check FIFO gets ready */
+                               if (!(snd_hdac_stream_readb(s, SD_STS) &
+                                     SD_STS_FIFO_READY))
+                                       nwait++;
+                       } else {
+                               /* check RUN bit is cleared */
+                               if (snd_hdac_stream_readb(s, SD_CTL) &
+                                   SD_CTL_DMA_START) {
+                                       nwait++;
+                                       /*
+                                        * Perform stream reset if DMA RUN
+                                        * bit not cleared within given timeout
+                                        */
+                                       if (timeout == 1)
+                                               snd_hdac_stream_reset(s);
                                }
                        }
-                       i++;
                }
                if (!nwait)
                        break;
index 6b7ff4a..4e76ed0 100644 (file)
@@ -463,7 +463,7 @@ static void snd_emu10k1_write_op(struct snd_emu10k1_fx8010_code *icode,
        u_int32_t *code;
        if (snd_BUG_ON(*ptr >= 512))
                return;
-       code = (u_int32_t __force *)icode->code + (*ptr) * 2;
+       code = icode->code + (*ptr) * 2;
        set_bit(*ptr, icode->code_valid);
        code[0] = ((x & 0x3ff) << 10) | (y & 0x3ff);
        code[1] = ((op & 0x0f) << 20) | ((r & 0x3ff) << 10) | (a & 0x3ff);
@@ -480,7 +480,7 @@ static void snd_emu10k1_audigy_write_op(struct snd_emu10k1_fx8010_code *icode,
        u_int32_t *code;
        if (snd_BUG_ON(*ptr >= 1024))
                return;
-       code = (u_int32_t __force *)icode->code + (*ptr) * 2;
+       code = icode->code + (*ptr) * 2;
        set_bit(*ptr, icode->code_valid);
        code[0] = ((x & 0x7ff) << 12) | (y & 0x7ff);
        code[1] = ((op & 0x0f) << 24) | ((r & 0x7ff) << 12) | (a & 0x7ff);
@@ -513,8 +513,8 @@ static int snd_emu10k1_gpr_poke(struct snd_emu10k1 *emu,
                if (!test_bit(gpr, icode->gpr_valid))
                        continue;
                if (in_kernel)
-                       val = *(__force u32 *)&icode->gpr_map[gpr];
-               else if (get_user(val, &icode->gpr_map[gpr]))
+                       val = icode->gpr_map[gpr];
+               else if (get_user(val, (__user u32 *)&icode->gpr_map[gpr]))
                        return -EFAULT;
                snd_emu10k1_ptr_write(emu, emu->gpr_base + gpr, 0, val);
        }
@@ -530,7 +530,7 @@ static int snd_emu10k1_gpr_peek(struct snd_emu10k1 *emu,
        for (gpr = 0; gpr < (emu->audigy ? 0x200 : 0x100); gpr++) {
                set_bit(gpr, icode->gpr_valid);
                val = snd_emu10k1_ptr_read(emu, emu->gpr_base + gpr, 0);
-               if (put_user(val, &icode->gpr_map[gpr]))
+               if (put_user(val, (__user u32 *)&icode->gpr_map[gpr]))
                        return -EFAULT;
        }
        return 0;
@@ -547,11 +547,11 @@ static int snd_emu10k1_tram_poke(struct snd_emu10k1 *emu,
                if (!test_bit(tram, icode->tram_valid))
                        continue;
                if (in_kernel) {
-                       val = *(__force u32 *)&icode->tram_data_map[tram];
-                       addr = *(__force u32 *)&icode->tram_addr_map[tram];
+                       val = icode->tram_data_map[tram];
+                       addr = icode->tram_addr_map[tram];
                } else {
-                       if (get_user(val, &icode->tram_data_map[tram]) ||
-                           get_user(addr, &icode->tram_addr_map[tram]))
+                       if (get_user(val, (__user __u32 *)&icode->tram_data_map[tram]) ||
+                           get_user(addr, (__user __u32 *)&icode->tram_addr_map[tram]))
                                return -EFAULT;
                }
                snd_emu10k1_ptr_write(emu, TANKMEMDATAREGBASE + tram, 0, val);
@@ -581,8 +581,8 @@ static int snd_emu10k1_tram_peek(struct snd_emu10k1 *emu,
                        addr = snd_emu10k1_ptr_read(emu, TANKMEMADDRREGBASE + tram, 0) >> 12;
                        addr |= snd_emu10k1_ptr_read(emu, A_TANKMEMCTLREGBASE + tram, 0) << 20;
                }
-               if (put_user(val, &icode->tram_data_map[tram]) ||
-                   put_user(addr, &icode->tram_addr_map[tram]))
+               if (put_user(val, (__user u32 *)&icode->tram_data_map[tram]) ||
+                   put_user(addr, (__user u32 *)&icode->tram_addr_map[tram]))
                        return -EFAULT;
        }
        return 0;
@@ -598,11 +598,11 @@ static int snd_emu10k1_code_poke(struct snd_emu10k1 *emu,
                if (!test_bit(pc / 2, icode->code_valid))
                        continue;
                if (in_kernel) {
-                       lo = *(__force u32 *)&icode->code[pc + 0];
-                       hi = *(__force u32 *)&icode->code[pc + 1];
+                       lo = icode->code[pc + 0];
+                       hi = icode->code[pc + 1];
                } else {
-                       if (get_user(lo, &icode->code[pc + 0]) ||
-                           get_user(hi, &icode->code[pc + 1]))
+                       if (get_user(lo, (__user u32 *)&icode->code[pc + 0]) ||
+                           get_user(hi, (__user u32 *)&icode->code[pc + 1]))
                                return -EFAULT;
                }
                snd_emu10k1_efx_write(emu, pc + 0, lo);
@@ -619,17 +619,21 @@ static int snd_emu10k1_code_peek(struct snd_emu10k1 *emu,
        memset(icode->code_valid, 0, sizeof(icode->code_valid));
        for (pc = 0; pc < (emu->audigy ? 2*1024 : 2*512); pc += 2) {
                set_bit(pc / 2, icode->code_valid);
-               if (put_user(snd_emu10k1_efx_read(emu, pc + 0), &icode->code[pc + 0]))
+               if (put_user(snd_emu10k1_efx_read(emu, pc + 0),
+                            (__user u32 *)&icode->code[pc + 0]))
                        return -EFAULT;
-               if (put_user(snd_emu10k1_efx_read(emu, pc + 1), &icode->code[pc + 1]))
+               if (put_user(snd_emu10k1_efx_read(emu, pc + 1),
+                            (__user u32 *)&icode->code[pc + 1]))
                        return -EFAULT;
        }
        return 0;
 }
 
 static struct snd_emu10k1_fx8010_ctl *
-snd_emu10k1_look_for_ctl(struct snd_emu10k1 *emu, struct emu10k1_ctl_elem_id *id)
+snd_emu10k1_look_for_ctl(struct snd_emu10k1 *emu,
+                        struct emu10k1_ctl_elem_id *_id)
 {
+       struct snd_ctl_elem_id *id = (struct snd_ctl_elem_id *)_id;
        struct snd_emu10k1_fx8010_ctl *ctl;
        struct snd_kcontrol *kcontrol;
 
@@ -672,41 +676,60 @@ static unsigned int *copy_tlv(const unsigned int __user *_tlv, bool in_kernel)
 }
 
 static int copy_gctl(struct snd_emu10k1 *emu,
-                    struct snd_emu10k1_fx8010_control_gpr *gctl,
-                    struct snd_emu10k1_fx8010_control_gpr __user *_gctl,
+                    struct snd_emu10k1_fx8010_control_gpr *dst,
+                    struct snd_emu10k1_fx8010_control_gpr *src,
                     int idx, bool in_kernel)
 {
-       struct snd_emu10k1_fx8010_control_old_gpr __user *octl;
+       struct snd_emu10k1_fx8010_control_gpr __user *_src;
+       struct snd_emu10k1_fx8010_control_old_gpr *octl;
+       struct snd_emu10k1_fx8010_control_old_gpr __user *_octl;
 
+       _src = (struct snd_emu10k1_fx8010_control_gpr __user *)src;
        if (emu->support_tlv) {
                if (in_kernel)
-                       memcpy(gctl, (__force void *)&_gctl[idx], sizeof(*gctl));
-               else if (copy_from_user(gctl, &_gctl[idx], sizeof(*gctl)))
+                       *dst = src[idx];
+               else if (copy_from_user(dst, &_src[idx], sizeof(*src)))
                        return -EFAULT;
                return 0;
        }
 
-       octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl;
+       octl = (struct snd_emu10k1_fx8010_control_old_gpr *)src;
+       _octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)octl;
        if (in_kernel)
-               memcpy(gctl, (__force void *)&octl[idx], sizeof(*octl));
-       else if (copy_from_user(gctl, &octl[idx], sizeof(*octl)))
+               memcpy(dst, &octl[idx], sizeof(*octl));
+       else if (copy_from_user(dst, &_octl[idx], sizeof(*octl)))
                return -EFAULT;
-       gctl->tlv = NULL;
+       dst->tlv = NULL;
        return 0;
 }
 
 static int copy_gctl_to_user(struct snd_emu10k1 *emu,
-                    struct snd_emu10k1_fx8010_control_gpr __user *_gctl,
-                    struct snd_emu10k1_fx8010_control_gpr *gctl,
+                    struct snd_emu10k1_fx8010_control_gpr *dst,
+                    struct snd_emu10k1_fx8010_control_gpr *src,
                     int idx)
 {
+       struct snd_emu10k1_fx8010_control_gpr __user *_dst;
        struct snd_emu10k1_fx8010_control_old_gpr __user *octl;
 
+       _dst = (struct snd_emu10k1_fx8010_control_gpr __user *)dst;
        if (emu->support_tlv)
-               return copy_to_user(&_gctl[idx], gctl, sizeof(*gctl));
+               return copy_to_user(&_dst[idx], src, sizeof(*src));
        
-       octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl;
-       return copy_to_user(&octl[idx], gctl, sizeof(*octl));
+       octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)dst;
+       return copy_to_user(&octl[idx], src, sizeof(*octl));
+}
+
+static int copy_ctl_elem_id(const struct emu10k1_ctl_elem_id *list, int i,
+                           struct emu10k1_ctl_elem_id *ret, bool in_kernel)
+{
+       struct emu10k1_ctl_elem_id __user *_id =
+               (struct emu10k1_ctl_elem_id __user *)&list[i];
+
+       if (in_kernel)
+               *ret = list[i];
+       else if (copy_from_user(ret, _id, sizeof(*ret)))
+               return -EFAULT;
+       return 0;
 }
 
 static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
@@ -714,17 +737,16 @@ static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
                                       bool in_kernel)
 {
        unsigned int i;
-       struct emu10k1_ctl_elem_id __user *_id;
        struct emu10k1_ctl_elem_id id;
        struct snd_emu10k1_fx8010_control_gpr *gctl;
+       struct snd_ctl_elem_id *gctl_id;
        int err;
        
-       _id = (__force struct emu10k1_ctl_elem_id __user *)icode->gpr_del_controls;
-       for (i = 0; i < icode->gpr_del_control_count; i++, _id++) {
-               if (in_kernel)
-                       id = *(__force struct emu10k1_ctl_elem_id *)_id;
-               else if (copy_from_user(&id, _id, sizeof(id)))
-                       return -EFAULT;
+       for (i = 0; i < icode->gpr_del_control_count; i++) {
+               err = copy_ctl_elem_id(icode->gpr_del_controls, i, &id,
+                                      in_kernel);
+               if (err < 0)
+                       return err;
                if (snd_emu10k1_look_for_ctl(emu, &id) == NULL)
                        return -ENOENT;
        }
@@ -740,16 +762,16 @@ static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
                }
                if (snd_emu10k1_look_for_ctl(emu, &gctl->id))
                        continue;
+               gctl_id = (struct snd_ctl_elem_id *)&gctl->id;
                down_read(&emu->card->controls_rwsem);
-               if (snd_ctl_find_id(emu->card,
-                                   (struct snd_ctl_elem_id *)&gctl->id)) {
+               if (snd_ctl_find_id(emu->card, gctl_id)) {
                        up_read(&emu->card->controls_rwsem);
                        err = -EEXIST;
                        goto __error;
                }
                up_read(&emu->card->controls_rwsem);
-               if (gctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER &&
-                   gctl->id.iface != SNDRV_CTL_ELEM_IFACE_PCM) {
+               if (gctl_id->iface != SNDRV_CTL_ELEM_IFACE_MIXER &&
+                   gctl_id->iface != SNDRV_CTL_ELEM_IFACE_PCM) {
                        err = -EINVAL;
                        goto __error;
                }
@@ -784,6 +806,7 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
 {
        unsigned int i, j;
        struct snd_emu10k1_fx8010_control_gpr *gctl;
+       struct snd_ctl_elem_id *gctl_id;
        struct snd_emu10k1_fx8010_ctl *ctl, *nctl;
        struct snd_kcontrol_new knew;
        struct snd_kcontrol *kctl;
@@ -804,24 +827,25 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
                        err = -EFAULT;
                        goto __error;
                }
-               if (gctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER &&
-                   gctl->id.iface != SNDRV_CTL_ELEM_IFACE_PCM) {
+               gctl_id = (struct snd_ctl_elem_id *)&gctl->id;
+               if (gctl_id->iface != SNDRV_CTL_ELEM_IFACE_MIXER &&
+                   gctl_id->iface != SNDRV_CTL_ELEM_IFACE_PCM) {
                        err = -EINVAL;
                        goto __error;
                }
-               if (! gctl->id.name[0]) {
+               if (!*gctl_id->name) {
                        err = -EINVAL;
                        goto __error;
                }
                ctl = snd_emu10k1_look_for_ctl(emu, &gctl->id);
                memset(&knew, 0, sizeof(knew));
-               knew.iface = gctl->id.iface;
-               knew.name = gctl->id.name;
-               knew.index = gctl->id.index;
-               knew.device = gctl->id.device;
-               knew.subdevice = gctl->id.subdevice;
+               knew.iface = gctl_id->iface;
+               knew.name = gctl_id->name;
+               knew.index = gctl_id->index;
+               knew.device = gctl_id->device;
+               knew.subdevice = gctl_id->subdevice;
                knew.info = snd_emu10k1_gpr_ctl_info;
-               knew.tlv.p = copy_tlv((__force const unsigned int __user *)gctl->tlv, in_kernel);
+               knew.tlv.p = copy_tlv((const unsigned int __user *)gctl->tlv, in_kernel);
                if (knew.tlv.p)
                        knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
                                SNDRV_CTL_ELEM_ACCESS_TLV_READ;
@@ -878,17 +902,15 @@ static int snd_emu10k1_del_controls(struct snd_emu10k1 *emu,
 {
        unsigned int i;
        struct emu10k1_ctl_elem_id id;
-       struct emu10k1_ctl_elem_id __user *_id;
        struct snd_emu10k1_fx8010_ctl *ctl;
        struct snd_card *card = emu->card;
+       int err;
        
-       _id = (__force struct emu10k1_ctl_elem_id __user *)icode->gpr_del_controls;
-
-       for (i = 0; i < icode->gpr_del_control_count; i++, _id++) {
-               if (in_kernel)
-                       id = *(__force struct emu10k1_ctl_elem_id *)_id;
-               else if (copy_from_user(&id, _id, sizeof(id)))
-                       return -EFAULT;
+       for (i = 0; i < icode->gpr_del_control_count; i++) {
+               err = copy_ctl_elem_id(icode->gpr_del_controls, i, &id,
+                                      in_kernel);
+               if (err < 0)
+                       return err;
                down_write(&card->controls_rwsem);
                ctl = snd_emu10k1_look_for_ctl(emu, &id);
                if (ctl)
@@ -917,7 +939,7 @@ static int snd_emu10k1_list_controls(struct snd_emu10k1 *emu,
                    i < icode->gpr_list_control_count) {
                        memset(gctl, 0, sizeof(*gctl));
                        id = &ctl->kcontrol->id;
-                       gctl->id.iface = id->iface;
+                       gctl->id.iface = (__force int)id->iface;
                        strlcpy(gctl->id.name, id->name, sizeof(gctl->id.name));
                        gctl->id.index = id->index;
                        gctl->id.device = id->device;
@@ -1095,7 +1117,7 @@ static void
 snd_emu10k1_init_mono_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
                              const char *name, int gpr, int defval)
 {
-       ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+       ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
        strcpy(ctl->id.name, name);
        ctl->vcount = ctl->count = 1;
        ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
@@ -1116,7 +1138,7 @@ static void
 snd_emu10k1_init_stereo_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
                                const char *name, int gpr, int defval)
 {
-       ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+       ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
        strcpy(ctl->id.name, name);
        ctl->vcount = ctl->count = 2;
        ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
@@ -1138,7 +1160,7 @@ static void
 snd_emu10k1_init_mono_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
                                    const char *name, int gpr, int defval)
 {
-       ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+       ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
        strcpy(ctl->id.name, name);
        ctl->vcount = ctl->count = 1;
        ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
@@ -1151,7 +1173,7 @@ static void
 snd_emu10k1_init_stereo_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
                                      const char *name, int gpr, int defval)
 {
-       ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+       ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
        strcpy(ctl->id.name, name);
        ctl->vcount = ctl->count = 2;
        ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
@@ -1204,8 +1226,8 @@ static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
        if (!icode)
                return err;
 
-       icode->gpr_map = (u_int32_t __user *) kcalloc(512 + 256 + 256 + 2 * 1024,
-                                                     sizeof(u_int32_t), GFP_KERNEL);
+       icode->gpr_map = kcalloc(512 + 256 + 256 + 2 * 1024,
+                                sizeof(u_int32_t), GFP_KERNEL);
        if (!icode->gpr_map)
                goto __err_gpr;
        controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
@@ -1213,7 +1235,7 @@ static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
        if (!controls)
                goto __err_ctrls;
 
-       gpr_map = (u32 __force *)icode->gpr_map;
+       gpr_map = icode->gpr_map;
 
        icode->tram_data_map = icode->gpr_map + 512;
        icode->tram_addr_map = icode->tram_data_map + 256;
@@ -1468,7 +1490,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
        
 
        ctl = &controls[nctl + 0];
-       ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+       ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
        strcpy(ctl->id.name, "Tone Control - Bass");
        ctl->vcount = 2;
        ctl->count = 10;
@@ -1477,7 +1499,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
        ctl->value[0] = ctl->value[1] = 20;
        ctl->translation = EMU10K1_GPR_TRANSLATION_BASS;
        ctl = &controls[nctl + 1];
-       ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+       ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
        strcpy(ctl->id.name, "Tone Control - Treble");
        ctl->vcount = 2;
        ctl->count = 10;
@@ -1758,7 +1780,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
                A_OP(icode, &ptr, 0x0f, 0xc0, 0xc0, 0xcf, 0xc0);
 
        icode->gpr_add_control_count = nctl;
-       icode->gpr_add_controls = (struct snd_emu10k1_fx8010_control_gpr __user *)controls;
+       icode->gpr_add_controls = controls;
        emu->support_tlv = 1; /* support TLV */
        err = snd_emu10k1_icode_poke(emu, icode, true);
        emu->support_tlv = 0; /* clear again */
@@ -1766,7 +1788,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
 __err:
        kfree(controls);
 __err_ctrls:
-       kfree((void __force *)icode->gpr_map);
+       kfree(icode->gpr_map);
 __err_gpr:
        kfree(icode);
        return err;
@@ -1839,8 +1861,8 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
        if (!icode)
                return err;
 
-       icode->gpr_map = (u_int32_t __user *) kcalloc(256 + 160 + 160 + 2 * 512,
-                                                     sizeof(u_int32_t), GFP_KERNEL);
+       icode->gpr_map = kcalloc(256 + 160 + 160 + 2 * 512,
+                                sizeof(u_int32_t), GFP_KERNEL);
        if (!icode->gpr_map)
                goto __err_gpr;
 
@@ -1854,7 +1876,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
        if (!ipcm)
                goto __err_ipcm;
 
-       gpr_map = (u32 __force *)icode->gpr_map;
+       gpr_map = icode->gpr_map;
 
        icode->tram_data_map = icode->gpr_map + 256;
        icode->tram_addr_map = icode->tram_data_map + 160;
@@ -2188,7 +2210,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
        OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 5), GPR(playback + 5), C_00000000, C_00000000); /* LFE */
 
        ctl = &controls[i + 0];
-       ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+       ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
        strcpy(ctl->id.name, "Tone Control - Bass");
        ctl->vcount = 2;
        ctl->count = 10;
@@ -2198,7 +2220,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
        ctl->tlv = snd_emu10k1_bass_treble_db_scale;
        ctl->translation = EMU10K1_GPR_TRANSLATION_BASS;
        ctl = &controls[i + 1];
-       ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+       ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
        strcpy(ctl->id.name, "Tone Control - Treble");
        ctl->vcount = 2;
        ctl->count = 10;
@@ -2384,7 +2406,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
        if ((err = snd_emu10k1_fx8010_tram_setup(emu, ipcm->buffer_size)) < 0)
                goto __err;
        icode->gpr_add_control_count = i;
-       icode->gpr_add_controls = (struct snd_emu10k1_fx8010_control_gpr __user *)controls;
+       icode->gpr_add_controls = controls;
        emu->support_tlv = 1; /* support TLV */
        err = snd_emu10k1_icode_poke(emu, icode, true);
        emu->support_tlv = 0; /* clear again */
@@ -2395,7 +2417,7 @@ __err:
 __err_ipcm:
        kfree(controls);
 __err_ctrls:
-       kfree((void __force *)icode->gpr_map);
+       kfree(icode->gpr_map);
 __err_gpr:
        kfree(icode);
        return err;
index 9757667..2609e39 100644 (file)
@@ -1110,16 +1110,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
                if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
                        active = true;
 
-               /* clear rirb int */
                status = azx_readb(chip, RIRBSTS);
                if (status & RIRB_INT_MASK) {
+                       /*
+                        * Clearing the interrupt status here ensures that no
+                        * interrupt gets masked after the RIRB wp is read in
+                        * snd_hdac_bus_update_rirb. This avoids a possible
+                        * race condition where codec response in RIRB may
+                        * remain unserviced by IRQ, eventually falling back
+                        * to polling mode in azx_rirb_get_response.
+                        */
+                       azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
                        active = true;
                        if (status & RIRB_INT_RESPONSE) {
                                if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
                                        udelay(80);
                                snd_hdac_bus_update_rirb(bus);
                        }
-                       azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
                }
        } while (active && ++repeat < 10);
 
index b5e8d43..92a042e 100644 (file)
@@ -2451,6 +2451,8 @@ static const struct pci_device_id azx_ids[] = {
        /* Jasperlake */
        { PCI_DEVICE(0x8086, 0x38c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4dc8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Tigerlake */
        { PCI_DEVICE(0x8086, 0xa0c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 48bddc2..5119a9a 100644 (file)
@@ -1550,6 +1550,34 @@ static bool update_eld(struct hda_codec *codec,
        return eld_changed;
 }
 
+static struct snd_jack *pin_idx_to_pcm_jack(struct hda_codec *codec,
+                                           struct hdmi_spec_per_pin *per_pin)
+{
+       struct hdmi_spec *spec = codec->spec;
+       struct snd_jack *jack = NULL;
+       struct hda_jack_tbl *jack_tbl;
+
+       /* if !dyn_pcm_assign, get jack from hda_jack_tbl
+        * in !dyn_pcm_assign case, spec->pcm_rec[].jack is not
+        * NULL even after snd_hda_jack_tbl_clear() is called to
+        * free snd_jack. This may cause access invalid memory
+        * when calling snd_jack_report
+        */
+       if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign) {
+               jack = spec->pcm_rec[per_pin->pcm_idx].jack;
+       } else if (!spec->dyn_pcm_assign) {
+               /*
+                * jack tbl doesn't support DP MST
+                * DP MST will use dyn_pcm_assign,
+                * so DP MST will never come here
+                */
+               jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
+                                                   per_pin->dev_id);
+               if (jack_tbl)
+                       jack = jack_tbl->jack;
+       }
+       return jack;
+}
 /* update ELD and jack state via HD-audio verbs */
 static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
                                         int repoll)
@@ -1571,6 +1599,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        int present;
        bool ret;
        bool do_repoll = false;
+       struct snd_jack *pcm_jack = NULL;
 
        present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
 
@@ -1598,10 +1627,19 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
                        do_repoll = true;
        }
 
-       if (do_repoll)
+       if (do_repoll) {
                schedule_delayed_work(&per_pin->work, msecs_to_jiffies(300));
-       else
+       } else {
+               /*
+                * pcm_idx >=0 before update_eld() means it is in monitor
+                * disconnected event. Jack must be fetched before
+                * update_eld().
+                */
+               pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
                update_eld(codec, per_pin, eld);
+               if (!pcm_jack)
+                       pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
+       }
 
        ret = !repoll || !eld->monitor_present || eld->eld_valid;
 
@@ -1610,38 +1648,32 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
                jack->block_report = !ret;
                jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
                        AC_PINSENSE_PRESENCE : 0;
-       }
-       mutex_unlock(&per_pin->lock);
-       return ret;
-}
 
-static struct snd_jack *pin_idx_to_jack(struct hda_codec *codec,
-                                struct hdmi_spec_per_pin *per_pin)
-{
-       struct hdmi_spec *spec = codec->spec;
-       struct snd_jack *jack = NULL;
-       struct hda_jack_tbl *jack_tbl;
+               if (spec->dyn_pcm_assign && pcm_jack && !do_repoll) {
+                       int state = 0;
+
+                       if (jack->pin_sense & AC_PINSENSE_PRESENCE)
+                               state = SND_JACK_AVOUT;
+                       snd_jack_report(pcm_jack, state);
+               }
 
-       /* if !dyn_pcm_assign, get jack from hda_jack_tbl
-        * in !dyn_pcm_assign case, spec->pcm_rec[].jack is not
-        * NULL even after snd_hda_jack_tbl_clear() is called to
-        * free snd_jack. This may cause access invalid memory
-        * when calling snd_jack_report
-        */
-       if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign)
-               jack = spec->pcm_rec[per_pin->pcm_idx].jack;
-       else if (!spec->dyn_pcm_assign) {
                /*
-                * jack tbl doesn't support DP MST
-                * DP MST will use dyn_pcm_assign,
-                * so DP MST will never come here
+                * snd_hda_jack_pin_sense() call at the beginning of this
+                * function, updates jack->pins_sense and clears
+                * jack->jack_dirty, therefore snd_hda_jack_report_sync() will
+                * not override the jack->pin_sense.
+                *
+                * snd_hda_jack_report_sync() is superfluous for dyn_pcm_assign
+                * case. The jack->pin_sense update was already performed, and
+                * hda_jack->jack is NULL for dyn_pcm_assign.
+                *
+                * Don't call snd_hda_jack_report_sync() for
+                * dyn_pcm_assign.
                 */
-               jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
-                                                   per_pin->dev_id);
-               if (jack_tbl)
-                       jack = jack_tbl->jack;
+               ret = ret && !spec->dyn_pcm_assign;
        }
-       return jack;
+       mutex_unlock(&per_pin->lock);
+       return ret;
 }
 
 /* update ELD and jack state via audio component */
@@ -1677,10 +1709,10 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
        /* pcm_idx >=0 before update_eld() means it is in monitor
         * disconnected event. Jack must be fetched before update_eld()
         */
-       jack = pin_idx_to_jack(codec, per_pin);
+       jack = pin_idx_to_pcm_jack(codec, per_pin);
        changed = update_eld(codec, per_pin, eld);
        if (jack == NULL)
-               jack = pin_idx_to_jack(codec, per_pin);
+               jack = pin_idx_to_pcm_jack(codec, per_pin);
        if (changed && jack)
                snd_jack_report(jack,
                                (eld->monitor_present && eld->eld_valid) ?
@@ -4256,6 +4288,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI",    patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",    patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",  patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",        patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",   patch_i915_byt_hdmi),
index 3b38a13..4770fb3 100644 (file)
@@ -9111,6 +9111,7 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2,
                {0x14, 0x01014010},
                {0x17, 0x90170150},
+               {0x19, 0x02a11060},
                {0x1b, 0x01813030},
                {0x21, 0x02211020}),
        SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2,
index c1e15ec..cc06f0a 100644 (file)
@@ -4802,7 +4802,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
                break;
        }
        case SNDRV_HDSP_IOCTL_UPLOAD_FIRMWARE: {
-               struct hdsp_firmware __user *firmware;
+               struct hdsp_firmware firmware;
                u32 __user *firmware_data;
                int err;
 
@@ -4815,10 +4815,9 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
 
                dev_info(hdsp->card->dev,
                         "initializing firmware upload\n");
-               firmware = (struct hdsp_firmware __user *)argp;
-
-               if (get_user(firmware_data, (__force void __user **)&firmware->firmware_data))
+               if (copy_from_user(&firmware, argp, sizeof(firmware)))
                        return -EFAULT;
+               firmware_data = (u32 __user *)firmware.firmware_data;
 
                if (hdsp_check_for_iobox (hdsp))
                        return -EIO;
index bf51cad..31cd400 100644 (file)
@@ -234,30 +234,32 @@ static int acp3x_i2s_trigger(struct snd_pcm_substream *substream,
                        switch (rtd->i2s_instance) {
                        case I2S_BT_INSTANCE:
                                reg_val = mmACP_BTTDM_ITER;
-                               ier_val = mmACP_BTTDM_IER;
                                break;
                        case I2S_SP_INSTANCE:
                        default:
                                reg_val = mmACP_I2STDM_ITER;
-                               ier_val = mmACP_I2STDM_IER;
                        }
 
                } else {
                        switch (rtd->i2s_instance) {
                        case I2S_BT_INSTANCE:
                                reg_val = mmACP_BTTDM_IRER;
-                               ier_val = mmACP_BTTDM_IER;
                                break;
                        case I2S_SP_INSTANCE:
                        default:
                                reg_val = mmACP_I2STDM_IRER;
-                               ier_val = mmACP_I2STDM_IER;
                        }
                }
                val = rv_readl(rtd->acp3x_base + reg_val);
                val = val & ~BIT(0);
                rv_writel(val, rtd->acp3x_base + reg_val);
-               rv_writel(0, rtd->acp3x_base + ier_val);
+
+               if (!(rv_readl(rtd->acp3x_base + mmACP_BTTDM_ITER) & BIT(0)) &&
+                    !(rv_readl(rtd->acp3x_base + mmACP_BTTDM_IRER) & BIT(0)))
+                       rv_writel(0, rtd->acp3x_base + mmACP_BTTDM_IER);
+               if (!(rv_readl(rtd->acp3x_base + mmACP_I2STDM_ITER) & BIT(0)) &&
+                    !(rv_readl(rtd->acp3x_base + mmACP_I2STDM_IRER) & BIT(0)))
+                       rv_writel(0, rtd->acp3x_base + mmACP_I2STDM_IER);
                ret = 0;
                break;
        default:
index 5c3ec3c..aecc3c0 100644 (file)
@@ -349,13 +349,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
        component = snd_soc_rtdcom_lookup(prtd, DRV_NAME);
        adata = dev_get_drvdata(component->dev);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               adata->play_stream = NULL;
-               adata->i2ssp_play_stream = NULL;
-       } else {
-               adata->capture_stream = NULL;
-               adata->i2ssp_capture_stream = NULL;
-       }
 
        /* Disable ACP irq, when the current stream is being closed and
         * another stream is also not active.
@@ -363,6 +356,13 @@ static int acp3x_dma_close(struct snd_soc_component *component,
        if (!adata->play_stream && !adata->capture_stream &&
                !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
                rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               adata->play_stream = NULL;
+               adata->i2ssp_play_stream = NULL;
+       } else {
+               adata->capture_stream = NULL;
+               adata->i2ssp_capture_stream = NULL;
+       }
        return 0;
 }
 
index c9eb683..7e90f5d 100644 (file)
@@ -214,7 +214,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_UDA134X
        select SND_SOC_UDA1380 if I2C
        select SND_SOC_WCD9335 if SLIMBUS
-       select SND_SOC_WCD934X if MFD_WCD934X
+       select SND_SOC_WCD934X if MFD_WCD934X && COMMON_CLK
        select SND_SOC_WL1273 if MFD_WL1273_CORE
        select SND_SOC_WM0010 if SPI_MASTER
        select SND_SOC_WM1250_EV1 if I2C
@@ -1334,6 +1334,7 @@ config SND_SOC_WCD9335
 
 config SND_SOC_WCD934X
        tristate "WCD9340/WCD9341 Codec"
+       depends on COMMON_CLK
        depends on MFD_WCD934X
        help
          The WCD9340/9341 is a audio codec IC Integrated in
index e8c5fda..979cfb1 100644 (file)
@@ -295,8 +295,7 @@ static int ak4104_spi_probe(struct spi_device *spi)
 
        reset_gpiod = devm_gpiod_get_optional(&spi->dev, "reset",
                                              GPIOD_OUT_HIGH);
-       if (IS_ERR(reset_gpiod) &&
-           PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
+       if (PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
        /* read the 'reserved' register - according to the datasheet, it
index 793a14d..5f25b9f 100644 (file)
@@ -681,8 +681,7 @@ static int cs4270_i2c_probe(struct i2c_client *i2c_client,
 
        reset_gpiod = devm_gpiod_get_optional(&i2c_client->dev, "reset",
                                              GPIOD_OUT_HIGH);
-       if (IS_ERR(reset_gpiod) &&
-           PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
+       if (PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
        cs4270->regmap = devm_regmap_init_i2c(i2c_client, &cs4270_regmap);
index 0313e11..5bc2c64 100644 (file)
@@ -52,7 +52,8 @@ static void max98090_shdn_restore_locked(struct max98090_priv *max98090)
 
 static void max98090_shdn_save(struct max98090_priv *max98090)
 {
-       mutex_lock(&max98090->component->card->dapm_mutex);
+       mutex_lock_nested(&max98090->component->card->dapm_mutex,
+                         SND_SOC_DAPM_CLASS_RUNTIME);
        max98090_shdn_save_locked(max98090);
 }
 
index 4a9c5b5..6d490e2 100644 (file)
@@ -389,7 +389,7 @@ static const char * const rt1015_boost_mode[] = {
        "Bypass", "Adaptive", "Fixed Adaptive"
 };
 
-static const SOC_ENUM_SINGLE_DECL(rt1015_boost_mode_enum, 0, 0,
+static SOC_ENUM_SINGLE_DECL(rt1015_boost_mode_enum, 0, 0,
        rt1015_boost_mode);
 
 static int rt1015_boost_mode_get(struct snd_kcontrol *kcontrol,
index 8a03dbf..d930f60 100644 (file)
@@ -673,7 +673,7 @@ static const struct sdw_device_id rt1308_id[] = {
 };
 MODULE_DEVICE_TABLE(sdw, rt1308_id);
 
-static int rt1308_dev_suspend(struct device *dev)
+static int __maybe_unused rt1308_dev_suspend(struct device *dev)
 {
        struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
 
@@ -687,7 +687,7 @@ static int rt1308_dev_suspend(struct device *dev)
 
 #define RT1308_PROBE_TIMEOUT 2000
 
-static int rt1308_dev_resume(struct device *dev)
+static int __maybe_unused rt1308_dev_resume(struct device *dev)
 {
        struct sdw_slave *slave = dev_to_sdw_dev(dev);
        struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
index a4b9542..d4e0f95 100644 (file)
@@ -486,7 +486,7 @@ static const struct sdw_device_id rt700_id[] = {
 };
 MODULE_DEVICE_TABLE(sdw, rt700_id);
 
-static int rt700_dev_suspend(struct device *dev)
+static int __maybe_unused rt700_dev_suspend(struct device *dev)
 {
        struct rt700_priv *rt700 = dev_get_drvdata(dev);
 
@@ -500,7 +500,7 @@ static int rt700_dev_suspend(struct device *dev)
 
 #define RT700_PROBE_TIMEOUT 2000
 
-static int rt700_dev_resume(struct device *dev)
+static int __maybe_unused rt700_dev_resume(struct device *dev)
 {
        struct sdw_slave *slave = dev_to_sdw_dev(dev);
        struct rt700_priv *rt700 = dev_get_drvdata(dev);
index 85e62e1..fc3a3fa 100644 (file)
@@ -487,7 +487,7 @@ static const struct sdw_device_id rt711_id[] = {
 };
 MODULE_DEVICE_TABLE(sdw, rt711_id);
 
-static int rt711_dev_suspend(struct device *dev)
+static int __maybe_unused rt711_dev_suspend(struct device *dev)
 {
        struct rt711_priv *rt711 = dev_get_drvdata(dev);
 
@@ -501,7 +501,7 @@ static int rt711_dev_suspend(struct device *dev)
 
 #define RT711_PROBE_TIMEOUT 2000
 
-static int rt711_dev_resume(struct device *dev)
+static int __maybe_unused rt711_dev_resume(struct device *dev)
 {
        struct sdw_slave *slave = dev_to_sdw_dev(dev);
        struct rt711_priv *rt711 = dev_get_drvdata(dev);
index 6d892c4..64ef56e 100644 (file)
@@ -549,7 +549,7 @@ static const struct sdw_device_id rt715_id[] = {
 };
 MODULE_DEVICE_TABLE(sdw, rt715_id);
 
-static int rt715_dev_suspend(struct device *dev)
+static int __maybe_unused rt715_dev_suspend(struct device *dev)
 {
        struct rt715_priv *rt715 = dev_get_drvdata(dev);
 
@@ -563,7 +563,7 @@ static int rt715_dev_suspend(struct device *dev)
 
 #define RT715_PROBE_TIMEOUT 2000
 
-static int rt715_dev_resume(struct device *dev)
+static int __maybe_unused rt715_dev_resume(struct device *dev)
 {
        struct sdw_slave *slave = dev_to_sdw_dev(dev);
        struct rt715_priv *rt715 = dev_get_drvdata(dev);
index b4e9a6c..d087f3b 100644 (file)
@@ -1098,11 +1098,9 @@ static int aic32x4_setup_regulators(struct device *dev,
                        return PTR_ERR(aic32x4->supply_av);
                }
        } else {
-               if (IS_ERR(aic32x4->supply_dv) &&
-                               PTR_ERR(aic32x4->supply_dv) == -EPROBE_DEFER)
+               if (PTR_ERR(aic32x4->supply_dv) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
-               if (IS_ERR(aic32x4->supply_av) &&
-                               PTR_ERR(aic32x4->supply_av) == -EPROBE_DEFER)
+               if (PTR_ERR(aic32x4->supply_av) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
        }
 
index 33b13f3..9177401 100644 (file)
@@ -617,12 +617,15 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
                snd_soc_dapm_add_routes(&card->dapm, broxton_map,
                                        ARRAY_SIZE(broxton_map));
 
-       pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
-                              head);
-       component = pcm->codec_dai->component;
+       if (list_empty(&ctx->hdmi_pcm_list))
+               return -EINVAL;
 
-       if (ctx->common_hdmi_codec_drv)
+       if (ctx->common_hdmi_codec_drv) {
+               pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+                                      head);
+               component = pcm->codec_dai->component;
                return hda_dsp_hdmi_build_controls(card, component);
+       }
 
        list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
                component = pcm->codec_dai->component;
@@ -643,9 +646,6 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
                i++;
        }
 
-       if (!component)
-               return -EINVAL;
-
        return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
 
index 067a97e..4b67f26 100644 (file)
@@ -529,12 +529,15 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
        int err, i = 0;
        char jack_name[NAME_SIZE];
 
-       pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
-                              head);
-       component = pcm->codec_dai->component;
+       if (list_empty(&ctx->hdmi_pcm_list))
+               return -EINVAL;
 
-       if (ctx->common_hdmi_codec_drv)
+       if (ctx->common_hdmi_codec_drv) {
+               pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+                                      head);
+               component = pcm->codec_dai->component;
                return hda_dsp_hdmi_build_controls(card, component);
+       }
 
        list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
                component = pcm->codec_dai->component;
@@ -555,9 +558,6 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
                i++;
        }
 
-       if (!component)
-               return -EINVAL;
-
        return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
 
index d6efc55..dd80d01 100644 (file)
@@ -241,12 +241,15 @@ static int sof_card_late_probe(struct snd_soc_card *card)
        struct hdmi_pcm *pcm;
        int ret, i = 0;
 
-       pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
-                              head);
-       component = pcm->codec_dai->component;
+       if (list_empty(&ctx->hdmi_pcm_list))
+               return -EINVAL;
 
-       if (ctx->common_hdmi_codec_drv)
+       if (ctx->common_hdmi_codec_drv) {
+               pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
+                                      head);
+               component = pcm->codec_dai->component;
                return hda_dsp_hdmi_build_controls(card, component);
+       }
 
        list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
                component = pcm->codec_dai->component;
@@ -265,8 +268,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
 
                i++;
        }
-       if (!component)
-               return -EINVAL;
 
        return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
index 4a6d117..8e947ba 100644 (file)
@@ -534,15 +534,18 @@ static int glk_card_late_probe(struct snd_soc_card *card)
        struct snd_soc_component *component = NULL;
        char jack_name[NAME_SIZE];
        struct glk_hdmi_pcm *pcm;
-       int err = 0;
+       int err;
        int i = 0;
 
-       pcm = list_first_entry(&ctx->hdmi_pcm_list, struct glk_hdmi_pcm,
-                              head);
-       component = pcm->codec_dai->component;
+       if (list_empty(&ctx->hdmi_pcm_list))
+               return -EINVAL;
 
-       if (ctx->common_hdmi_codec_drv)
+       if (ctx->common_hdmi_codec_drv) {
+               pcm = list_first_entry(&ctx->hdmi_pcm_list, struct glk_hdmi_pcm,
+                                      head);
+               component = pcm->codec_dai->component;
                return hda_dsp_hdmi_build_controls(card, component);
+       }
 
        list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
                component = pcm->codec_dai->component;
@@ -563,9 +566,6 @@ static int glk_card_late_probe(struct snd_soc_card *card)
                i++;
        }
 
-       if (!component)
-               return -EINVAL;
-
        return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
 
index 8a13231..5d87887 100644 (file)
@@ -273,19 +273,22 @@ static int sof_card_late_probe(struct snd_soc_card *card)
        struct snd_soc_component *component = NULL;
        char jack_name[NAME_SIZE];
        struct sof_hdmi_pcm *pcm;
-       int err = 0;
+       int err;
        int i = 0;
 
        /* HDMI is not supported by SOF on Baytrail/CherryTrail */
        if (is_legacy_cpu)
                return 0;
 
-       pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
-                              head);
-       component = pcm->codec_dai->component;
+       if (list_empty(&ctx->hdmi_pcm_list))
+               return -EINVAL;
 
-       if (ctx->common_hdmi_codec_drv)
+       if (ctx->common_hdmi_codec_drv) {
+               pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
+                                      head);
+               component = pcm->codec_dai->component;
                return hda_dsp_hdmi_build_controls(card, component);
+       }
 
        list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
                component = pcm->codec_dai->component;
@@ -305,8 +308,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
 
                i++;
        }
-       if (!component)
-               return -EINVAL;
 
        return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
index 67e9da4..7527330 100644 (file)
@@ -59,7 +59,7 @@ static const u64 rt1308_2_adr[] = {
 };
 
 static const u64 rt715_3_adr[] = {
-       0x000310025D715000
+       0x000310025D071500
 };
 
 static const struct snd_soc_acpi_link_adr icl_3_in_1_default[] = {
index 44f9c04..34cefba 100644 (file)
@@ -224,12 +224,12 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
        if (ret < 0) {
                dev_err(sdev->dev,
                        "error: failed to register DSP DAI driver %d\n", ret);
-               goto fw_run_err;
+               goto fw_trace_err;
        }
 
        ret = snd_sof_machine_register(sdev, plat_data);
        if (ret < 0)
-               goto fw_run_err;
+               goto fw_trace_err;
 
        /*
         * Some platforms in SOF, ex: BYT, may not have their platform PM
@@ -244,7 +244,8 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
 
        return 0;
 
-#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+fw_trace_err:
+       snd_sof_free_trace(sdev);
 fw_run_err:
        snd_sof_fw_unload(sdev);
 fw_load_err:
@@ -253,21 +254,10 @@ ipc_err:
        snd_sof_free_debug(sdev);
 dbg_err:
        snd_sof_remove(sdev);
-#else
-
-       /*
-        * when the probe_continue is handled in a work queue, the
-        * probe does not fail so we don't release resources here.
-        * They will be released with an explicit call to
-        * snd_sof_device_remove() when the PCI/ACPI device is removed
-        */
-
-fw_run_err:
-fw_load_err:
-ipc_err:
-dbg_err:
 
-#endif
+       /* all resources freed, update state to match */
+       sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+       sdev->first_boot = true;
 
        return ret;
 }
@@ -350,10 +340,12 @@ int snd_sof_device_remove(struct device *dev)
        if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
                cancel_work_sync(&sdev->probe_work);
 
-       snd_sof_fw_unload(sdev);
-       snd_sof_ipc_free(sdev);
-       snd_sof_free_debug(sdev);
-       snd_sof_free_trace(sdev);
+       if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED) {
+               snd_sof_fw_unload(sdev);
+               snd_sof_ipc_free(sdev);
+               snd_sof_free_debug(sdev);
+               snd_sof_free_trace(sdev);
+       }
 
        /*
         * Unregister machine driver. This will unbind the snd_card which
@@ -361,13 +353,15 @@ int snd_sof_device_remove(struct device *dev)
         * before freeing the snd_card.
         */
        snd_sof_machine_unregister(sdev, pdata);
+
        /*
         * Unregistering the machine driver results in unloading the topology.
         * Some widgets, ex: scheduler, attempt to power down the core they are
         * scheduled on, when they are unloaded. Therefore, the DSP must be
         * removed only after the topology has been unloaded.
         */
-       snd_sof_remove(sdev);
+       if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED)
+               snd_sof_remove(sdev);
 
        /* release firmware */
        release_firmware(pdata->fw);
index 78dfd5f..9106ab8 100644 (file)
@@ -170,23 +170,14 @@ EXPORT_SYMBOL_NS(hda_codec_probe_bus, SND_SOC_SOF_HDA_AUDIO_CODEC);
 #if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
        IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
 
-void hda_codec_i915_get(struct snd_sof_dev *sdev)
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable)
 {
        struct hdac_bus *bus = sof_to_bus(sdev);
 
-       dev_dbg(bus->dev, "Turning i915 HDAC power on\n");
-       snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
+       dev_dbg(bus->dev, "Turning i915 HDAC power %d\n", enable);
+       snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, enable);
 }
-EXPORT_SYMBOL_NS(hda_codec_i915_get, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
-
-void hda_codec_i915_put(struct snd_sof_dev *sdev)
-{
-       struct hdac_bus *bus = sof_to_bus(sdev);
-
-       dev_dbg(bus->dev, "Turning i915 HDAC power off\n");
-       snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
-}
-EXPORT_SYMBOL_NS(hda_codec_i915_put, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+EXPORT_SYMBOL_NS(hda_codec_i915_display_power, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
 
 int hda_codec_i915_init(struct snd_sof_dev *sdev)
 {
@@ -198,7 +189,7 @@ int hda_codec_i915_init(struct snd_sof_dev *sdev)
        if (ret < 0)
                return ret;
 
-       hda_codec_i915_get(sdev);
+       hda_codec_i915_display_power(sdev, true);
 
        return 0;
 }
@@ -209,7 +200,7 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev)
        struct hdac_bus *bus = sof_to_bus(sdev);
        int ret;
 
-       hda_codec_i915_put(sdev);
+       hda_codec_i915_display_power(sdev, false);
 
        ret = snd_hdac_i915_exit(bus);
 
index d08462f..65b86dd 100644 (file)
@@ -380,7 +380,8 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
        /* create codec instances */
        hda_codec_probe_bus(sdev, hda_codec_use_common_hdmi);
 
-       hda_codec_i915_put(sdev);
+       if (!HDA_IDISP_CODEC(bus->codec_mask))
+               hda_codec_i915_display_power(sdev, false);
 
        /*
         * we are done probing so decrement link counts
index a4d030b..6191d91 100644 (file)
@@ -586,15 +586,14 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev);
        (IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
         IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
 
-void hda_codec_i915_get(struct snd_sof_dev *sdev);
-void hda_codec_i915_put(struct snd_sof_dev *sdev);
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable);
 int hda_codec_i915_init(struct snd_sof_dev *sdev);
 int hda_codec_i915_exit(struct snd_sof_dev *sdev);
 
 #else
 
-static inline void hda_codec_i915_get(struct snd_sof_dev *sdev)  { }
-static inline void hda_codec_i915_put(struct snd_sof_dev *sdev)  { }
+static inline void hda_codec_i915_display_power(struct snd_sof_dev *sdev,
+                                               bool enable) { }
 static inline int hda_codec_i915_init(struct snd_sof_dev *sdev) { return 0; }
 static inline int hda_codec_i915_exit(struct snd_sof_dev *sdev) { return 0; }
 
index 314f309..29435ba 100644 (file)
@@ -591,6 +591,11 @@ static int sof_pcm_new(struct snd_soc_component *component,
                "spcm: allocate %s playback DMA buffer size 0x%x max 0x%x\n",
                caps->name, caps->buffer_size_min, caps->buffer_size_max);
 
+       if (!pcm->streams[stream].substream) {
+               dev_err(component->dev, "error: NULL playback substream!\n");
+               return -EINVAL;
+       }
+
        snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
                                   SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
                                   le32_to_cpu(caps->buffer_size_min),
@@ -609,6 +614,11 @@ capture:
                "spcm: allocate %s capture DMA buffer size 0x%x max 0x%x\n",
                caps->name, caps->buffer_size_min, caps->buffer_size_max);
 
+       if (!pcm->streams[stream].substream) {
+               dev_err(component->dev, "error: NULL capture substream!\n");
+               return -EINVAL;
+       }
+
        snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
                                   SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
                                   le32_to_cpu(caps->buffer_size_min),
index 84290bb..a0cde05 100644 (file)
@@ -56,6 +56,10 @@ static int sof_resume(struct device *dev, bool runtime_resume)
        if (!sof_ops(sdev)->resume || !sof_ops(sdev)->runtime_resume)
                return 0;
 
+       /* DSP was never successfully started, nothing to resume */
+       if (sdev->first_boot)
+               return 0;
+
        /*
         * if the runtime_resume flag is set, call the runtime_resume routine
         * or else call the system resume routine
index d855bc2..cec631a 100644 (file)
@@ -235,6 +235,7 @@ static const struct sof_dev_desc jsl_desc = {
        .chip_info = &jsl_chip_info,
        .default_fw_path = "intel/sof",
        .default_tplg_path = "intel/sof-tplg",
+       .default_fw_filename = "sof-jsl.ri",
        .nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
        .ops = &sof_cnl_ops,
 };
@@ -416,6 +417,8 @@ static const struct pci_device_id sof_pci_ids[] = {
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
        { PCI_DEVICE(0x8086, 0x38c8),
                .driver_data = (unsigned long)&jsl_desc},
+       { PCI_DEVICE(0x8086, 0x4dc8),
+               .driver_data = (unsigned long)&jsl_desc},
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
        { PCI_DEVICE(0x8086, 0x02c8),
index 4bb6503..d815090 100644 (file)
@@ -343,7 +343,10 @@ void snd_sof_free_trace(struct snd_sof_dev *sdev)
 
        snd_sof_release_trace(sdev);
 
-       snd_dma_free_pages(&sdev->dmatb);
-       snd_dma_free_pages(&sdev->dmatp);
+       if (sdev->dma_trace_pages) {
+               snd_dma_free_pages(&sdev->dmatb);
+               snd_dma_free_pages(&sdev->dmatp);
+               sdev->dma_trace_pages = 0;
+       }
 }
 EXPORT_SYMBOL(snd_sof_free_trace);
index cbe598b..98a9fe6 100644 (file)
@@ -555,7 +555,7 @@ static int sun4i_spdif_probe(struct platform_device *pdev)
        if (quirks->has_reset) {
                host->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
                                                                      NULL);
-               if (IS_ERR(host->rst) && PTR_ERR(host->rst) == -EPROBE_DEFER) {
+               if (PTR_ERR(host->rst) == -EPROBE_DEFER) {
                        ret = -EPROBE_DEFER;
                        dev_err(&pdev->dev, "Failed to get reset: %d\n", ret);
                        return ret;
index dbed3c5..d59882e 100644 (file)
@@ -127,7 +127,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
        struct device *dev = dai->dev;
        struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
        unsigned int mask, val, reg;
-       int ret, sample_size, srate, i2sclock, bitcnt, audio_bits;
+       int ret, sample_size, srate, i2sclock, bitcnt;
        struct tegra30_ahub_cif_conf cif_conf;
 
        if (params_channels(params) != 2)
@@ -137,19 +137,8 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
                val = TEGRA30_I2S_CTRL_BIT_SIZE_16;
-               audio_bits = TEGRA30_AUDIOCIF_BITS_16;
                sample_size = 16;
                break;
-       case SNDRV_PCM_FORMAT_S24_LE:
-               val = TEGRA30_I2S_CTRL_BIT_SIZE_24;
-               audio_bits = TEGRA30_AUDIOCIF_BITS_24;
-               sample_size = 24;
-               break;
-       case SNDRV_PCM_FORMAT_S32_LE:
-               val = TEGRA30_I2S_CTRL_BIT_SIZE_32;
-               audio_bits = TEGRA30_AUDIOCIF_BITS_32;
-               sample_size = 32;
-               break;
        default:
                return -EINVAL;
        }
@@ -181,8 +170,8 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
        cif_conf.threshold = 0;
        cif_conf.audio_channels = 2;
        cif_conf.client_channels = 2;
-       cif_conf.audio_bits = audio_bits;
-       cif_conf.client_bits = audio_bits;
+       cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
+       cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
        cif_conf.expand = 0;
        cif_conf.stereo_conv = 0;
        cif_conf.replicate = 0;
@@ -317,18 +306,14 @@ static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_8000_96000,
-               .formats = SNDRV_PCM_FMTBIT_S32_LE |
-                          SNDRV_PCM_FMTBIT_S24_LE |
-                          SNDRV_PCM_FMTBIT_S16_LE,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
        .capture = {
                .stream_name = "Capture",
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_8000_96000,
-               .formats = SNDRV_PCM_FMTBIT_S32_LE |
-                          SNDRV_PCM_FMTBIT_S24_LE |
-                          SNDRV_PCM_FMTBIT_S16_LE,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
        .ops = &tegra30_i2s_dai_ops,
        .symmetric_rates = 1,
index 94b903d..74c00c9 100644 (file)
@@ -558,11 +558,11 @@ static const struct scarlett2_config
 
 /* proprietary request/response format */
 struct scarlett2_usb_packet {
-       u32 cmd;
-       u16 size;
-       u16 seq;
-       u32 error;
-       u32 pad;
+       __le32 cmd;
+       __le16 size;
+       __le16 seq;
+       __le32 error;
+       __le32 pad;
        u8 data[];
 };
 
@@ -664,11 +664,11 @@ static int scarlett2_usb(
                        "Scarlett Gen 2 USB invalid response; "
                           "cmd tx/rx %d/%d seq %d/%d size %d/%d "
                           "error %d pad %d\n",
-                       le16_to_cpu(req->cmd), le16_to_cpu(resp->cmd),
+                       le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
                        le16_to_cpu(req->seq), le16_to_cpu(resp->seq),
                        resp_size, le16_to_cpu(resp->size),
-                       le16_to_cpu(resp->error),
-                       le16_to_cpu(resp->pad));
+                       le32_to_cpu(resp->error),
+                       le32_to_cpu(resp->pad));
                err = -EINVAL;
                goto unlock;
        }
@@ -687,7 +687,7 @@ error:
 /* Send SCARLETT2_USB_DATA_CMD SCARLETT2_USB_CONFIG_SAVE */
 static void scarlett2_config_save(struct usb_mixer_interface *mixer)
 {
-       u32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
+       __le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
 
        scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
                      &req, sizeof(u32),
@@ -713,11 +713,11 @@ static int scarlett2_usb_set_config(
        const struct scarlett2_config config_item =
               scarlett2_config_items[config_item_num];
        struct {
-               u32 offset;
-               u32 bytes;
-               s32 value;
+               __le32 offset;
+               __le32 bytes;
+               __le32 value;
        } __packed req;
-       u32 req2;
+       __le32 req2;
        int err;
        struct scarlett2_mixer_data *private = mixer->private_data;
 
@@ -753,8 +753,8 @@ static int scarlett2_usb_get(
        int offset, void *buf, int size)
 {
        struct {
-               u32 offset;
-               u32 size;
+               __le32 offset;
+               __le32 size;
        } __packed req;
 
        req.offset = cpu_to_le32(offset);
@@ -794,8 +794,8 @@ static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
        const struct scarlett2_device_info *info = private->info;
 
        struct {
-               u16 mix_num;
-               u16 data[SCARLETT2_INPUT_MIX_MAX];
+               __le16 mix_num;
+               __le16 data[SCARLETT2_INPUT_MIX_MAX];
        } __packed req;
 
        int i, j;
@@ -850,9 +850,9 @@ static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
        };
 
        struct {
-               u16 pad;
-               u16 num;
-               u32 data[SCARLETT2_MUX_MAX];
+               __le16 pad;
+               __le16 num;
+               __le32 data[SCARLETT2_MUX_MAX];
        } __packed req;
 
        req.pad = 0;
@@ -911,9 +911,9 @@ static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
                                          u16 *levels)
 {
        struct {
-               u16 pad;
-               u16 num_meters;
-               u32 magic;
+               __le16 pad;
+               __le16 num_meters;
+               __le32 magic;
        } __packed req;
        u32 resp[SCARLETT2_NUM_METERS];
        int i, err;
index 4034c20..6fe206f 100644 (file)
@@ -110,7 +110,7 @@ static bool validate_processing_unit(const void *p,
        default:
                if (v->type == UAC1_EXTENSION_UNIT)
                        return true; /* OK */
-               switch (d->wProcessType) {
+               switch (le16_to_cpu(d->wProcessType)) {
                case UAC_PROCESS_UP_DOWNMIX:
                case UAC_PROCESS_DOLBY_PROLOGIC:
                        if (d->bLength < len + 1) /* bNrModes */
@@ -125,7 +125,7 @@ static bool validate_processing_unit(const void *p,
        case UAC_VERSION_2:
                if (v->type == UAC2_EXTENSION_UNIT_V2)
                        return true; /* OK */
-               switch (d->wProcessType) {
+               switch (le16_to_cpu(d->wProcessType)) {
                case UAC2_PROCESS_UP_DOWNMIX:
                case UAC2_PROCESS_DOLBY_PROLOCIC: /* SiC! */
                        if (d->bLength < len + 1) /* bNrModes */
@@ -142,7 +142,7 @@ static bool validate_processing_unit(const void *p,
                        len += 2; /* wClusterDescrID */
                        break;
                }
-               switch (d->wProcessType) {
+               switch (le16_to_cpu(d->wProcessType)) {
                case UAC3_PROCESS_UP_DOWNMIX:
                        if (d->bLength < len + 1) /* bNrModes */
                                return false;
index 7e42f7b..bd77881 100644 (file)
@@ -28,6 +28,7 @@ help:
        @echo '  pci                    - PCI tools'
        @echo '  perf                   - Linux performance measurement and analysis tool'
        @echo '  selftests              - various kernel selftests'
+       @echo '  bootconfig             - boot config tool'
        @echo '  spi                    - spi tools'
        @echo '  tmon                   - thermal monitoring and tuning tool'
        @echo '  turbostat              - Intel CPU idle stats and freq reporting tool'
@@ -63,7 +64,7 @@ acpi: FORCE
 cpupower: FORCE
        $(call descend,power/$@)
 
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
+cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
        $(call descend,$@)
 
 liblockdep: FORCE
@@ -96,7 +97,7 @@ kvm_stat: FORCE
        $(call descend,kvm/$@)
 
 all: acpi cgroup cpupower gpio hv firewire liblockdep \
-               perf selftests spi turbostat usb \
+               perf selftests bootconfig spi turbostat usb \
                virtio vm bpf x86_energy_perf_policy \
                tmon freefall iio objtool kvm_stat wmi \
                pci debugging
@@ -107,7 +108,7 @@ acpi_install:
 cpupower_install:
        $(call descend,power/$(@:_install=),install)
 
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
        $(call descend,$(@:_install=),install)
 
 liblockdep_install:
@@ -141,7 +142,7 @@ acpi_clean:
 cpupower_clean:
        $(call descend,power/cpupower,clean)
 
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
+cgroup_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
        $(call descend,$(@:_clean=),clean)
 
 liblockdep_clean:
@@ -176,7 +177,7 @@ build_clean:
        $(call descend,build,clean)
 
 clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
-               perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
+               perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
                vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
                freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
                gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
diff --git a/tools/bootconfig/.gitignore b/tools/bootconfig/.gitignore
new file mode 100644 (file)
index 0000000..e7644df
--- /dev/null
@@ -0,0 +1 @@
+bootconfig
diff --git a/tools/bootconfig/Makefile b/tools/bootconfig/Makefile
new file mode 100644 (file)
index 0000000..a6146ac
--- /dev/null
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for bootconfig command
+
+bindir ?= /usr/bin
+
+HEADER = include/linux/bootconfig.h
+CFLAGS = -Wall -g -I./include
+
+PROGS = bootconfig
+
+all: $(PROGS)
+
+bootconfig: ../../lib/bootconfig.c main.c $(HEADER)
+       $(CC) $(filter %.c,$^) $(CFLAGS) -o $@
+
+install: $(PROGS)
+       install bootconfig $(DESTDIR)$(bindir)
+
+test: bootconfig
+       ./test-bootconfig.sh
+
+clean:
+       $(RM) -f *.o bootconfig
diff --git a/tools/bootconfig/include/linux/bootconfig.h b/tools/bootconfig/include/linux/bootconfig.h
new file mode 100644 (file)
index 0000000..078cbd2
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BOOTCONFIG_LINUX_BOOTCONFIG_H
+#define _BOOTCONFIG_LINUX_BOOTCONFIG_H
+
+#include "../../../../include/linux/bootconfig.h"
+
+#endif
diff --git a/tools/bootconfig/include/linux/bug.h b/tools/bootconfig/include/linux/bug.h
new file mode 100644 (file)
index 0000000..7b65a38
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_BUG_H
+#define _SKC_LINUX_BUG_H
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define WARN_ON(cond)  \
+       ((cond) ? printf("Internal warning(%s:%d, %s): %s\n",   \
+                       __FILE__, __LINE__, __func__, #cond) : 0)
+
+#endif
diff --git a/tools/bootconfig/include/linux/ctype.h b/tools/bootconfig/include/linux/ctype.h
new file mode 100644 (file)
index 0000000..c56ecc1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_CTYPE_H
+#define _SKC_LINUX_CTYPE_H
+
+#include <ctype.h>
+
+#endif
diff --git a/tools/bootconfig/include/linux/errno.h b/tools/bootconfig/include/linux/errno.h
new file mode 100644 (file)
index 0000000..5d9f91e
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_ERRNO_H
+#define _SKC_LINUX_ERRNO_H
+
+#include <asm/errno.h>
+
+#endif
diff --git a/tools/bootconfig/include/linux/kernel.h b/tools/bootconfig/include/linux/kernel.h
new file mode 100644 (file)
index 0000000..2d93320
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_KERNEL_H
+#define _SKC_LINUX_KERNEL_H
+
+#include <stdlib.h>
+#include <stdbool.h>
+
+#include <linux/printk.h>
+
+typedef unsigned short u16;
+typedef unsigned int   u32;
+
+#define unlikely(cond) (cond)
+
+#define __init
+#define __initdata
+
+#endif
diff --git a/tools/bootconfig/include/linux/printk.h b/tools/bootconfig/include/linux/printk.h
new file mode 100644 (file)
index 0000000..017bcd6
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_PRINTK_H
+#define _SKC_LINUX_PRINTK_H
+
+#include <stdio.h>
+
+/* controllable printf */
+extern int pr_output;
+#define printk(fmt, ...)       \
+       (pr_output ? printf(fmt, __VA_ARGS__) : 0)
+
+#define pr_err printk
+#define pr_warn        printk
+#define pr_info        printk
+#define pr_debug printk
+
+#endif
diff --git a/tools/bootconfig/include/linux/string.h b/tools/bootconfig/include/linux/string.h
new file mode 100644 (file)
index 0000000..8267af7
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_STRING_H
+#define _SKC_LINUX_STRING_H
+
+#include <string.h>
+
+/* Copied from lib/string.c */
+static inline char *skip_spaces(const char *str)
+{
+       while (isspace(*str))
+               ++str;
+       return (char *)str;
+}
+
+static inline char *strim(char *s)
+{
+       size_t size;
+       char *end;
+
+       size = strlen(s);
+       if (!size)
+               return s;
+
+       end = s + size - 1;
+       while (end >= s && isspace(*end))
+               end--;
+       *(end + 1) = '\0';
+
+       return skip_spaces(s);
+}
+
+#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
new file mode 100644 (file)
index 0000000..47f4884
--- /dev/null
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Boot config tool for initrd image
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+#include <linux/kernel.h>
+#include <linux/bootconfig.h>
+
+int pr_output = 1;
+
+static int xbc_show_array(struct xbc_node *node)
+{
+       const char *val;
+       int i = 0;
+
+       xbc_array_for_each_value(node, val) {
+               printf("\"%s\"%s", val, node->next ? ", " : ";\n");
+               i++;
+       }
+       return i;
+}
+
+static void xbc_show_compact_tree(void)
+{
+       struct xbc_node *node, *cnode;
+       int depth = 0, i;
+
+       node = xbc_root_node();
+       while (node && xbc_node_is_key(node)) {
+               for (i = 0; i < depth; i++)
+                       printf("\t");
+               cnode = xbc_node_get_child(node);
+               while (cnode && xbc_node_is_key(cnode) && !cnode->next) {
+                       printf("%s.", xbc_node_get_data(node));
+                       node = cnode;
+                       cnode = xbc_node_get_child(node);
+               }
+               if (cnode && xbc_node_is_key(cnode)) {
+                       printf("%s {\n", xbc_node_get_data(node));
+                       depth++;
+                       node = cnode;
+                       continue;
+               } else if (cnode && xbc_node_is_value(cnode)) {
+                       printf("%s = ", xbc_node_get_data(node));
+                       if (cnode->next)
+                               xbc_show_array(cnode);
+                       else
+                               printf("\"%s\";\n", xbc_node_get_data(cnode));
+               } else {
+                       printf("%s;\n", xbc_node_get_data(node));
+               }
+
+               if (node->next) {
+                       node = xbc_node_get_next(node);
+                       continue;
+               }
+               while (!node->next) {
+                       node = xbc_node_get_parent(node);
+                       if (!node)
+                               return;
+                       if (!xbc_node_get_child(node)->next)
+                               continue;
+                       depth--;
+                       for (i = 0; i < depth; i++)
+                               printf("\t");
+                       printf("}\n");
+               }
+               node = xbc_node_get_next(node);
+       }
+}
+
+/* Simple real checksum */
+int checksum(unsigned char *buf, int len)
+{
+       int i, sum = 0;
+
+       for (i = 0; i < len; i++)
+               sum += buf[i];
+
+       return sum;
+}
+
+#define PAGE_SIZE      4096
+
+int load_xbc_fd(int fd, char **buf, int size)
+{
+       int ret;
+
+       *buf = malloc(size + 1);
+       if (!*buf)
+               return -ENOMEM;
+
+       ret = read(fd, *buf, size);
+       if (ret < 0)
+               return -errno;
+       (*buf)[size] = '\0';
+
+       return ret;
+}
+
+/* Return the read size or -errno */
+int load_xbc_file(const char *path, char **buf)
+{
+       struct stat stat;
+       int fd, ret;
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0)
+               return -errno;
+       ret = fstat(fd, &stat);
+       if (ret < 0)
+               return -errno;
+
+       ret = load_xbc_fd(fd, buf, stat.st_size);
+
+       close(fd);
+
+       return ret;
+}
+
+int load_xbc_from_initrd(int fd, char **buf)
+{
+       struct stat stat;
+       int ret;
+       u32 size = 0, csum = 0, rcsum;
+
+       ret = fstat(fd, &stat);
+       if (ret < 0)
+               return -errno;
+
+       if (stat.st_size < 8)
+               return 0;
+
+       if (lseek(fd, -8, SEEK_END) < 0) {
+               printf("Failed to lseek: %d\n", -errno);
+               return -errno;
+       }
+
+       if (read(fd, &size, sizeof(u32)) < 0)
+               return -errno;
+
+       if (read(fd, &csum, sizeof(u32)) < 0)
+               return -errno;
+
+       /* Wrong size, maybe no boot config here */
+       if (stat.st_size < size + 8)
+               return 0;
+
+       if (lseek(fd, stat.st_size - 8 - size, SEEK_SET) < 0) {
+               printf("Failed to lseek: %d\n", -errno);
+               return -errno;
+       }
+
+       ret = load_xbc_fd(fd, buf, size);
+       if (ret < 0)
+               return ret;
+
+       /* Wrong Checksum, maybe no boot config here */
+       rcsum = checksum((unsigned char *)*buf, size);
+       if (csum != rcsum) {
+               printf("checksum error: %d != %d\n", csum, rcsum);
+               return 0;
+       }
+
+       ret = xbc_init(*buf);
+       /* Wrong data, maybe no boot config here */
+       if (ret < 0)
+               return 0;
+
+       return size;
+}
+
+int show_xbc(const char *path)
+{
+       int ret, fd;
+       char *buf = NULL;
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0) {
+               printf("Failed to open initrd %s: %d\n", path, fd);
+               return -errno;
+       }
+
+       ret = load_xbc_from_initrd(fd, &buf);
+       if (ret < 0)
+               printf("Failed to load a boot config from initrd: %d\n", ret);
+       else
+               xbc_show_compact_tree();
+
+       close(fd);
+       free(buf);
+
+       return ret;
+}
+
+int delete_xbc(const char *path)
+{
+       struct stat stat;
+       int ret = 0, fd, size;
+       char *buf = NULL;
+
+       fd = open(path, O_RDWR);
+       if (fd < 0) {
+               printf("Failed to open initrd %s: %d\n", path, fd);
+               return -errno;
+       }
+
+       /*
+        * Suppress error messages in xbc_init() because it can be just a
+        * data which concidentally matches the size and checksum footer.
+        */
+       pr_output = 0;
+       size = load_xbc_from_initrd(fd, &buf);
+       pr_output = 1;
+       if (size < 0) {
+               ret = size;
+               printf("Failed to load a boot config from initrd: %d\n", ret);
+       } else if (size > 0) {
+               ret = fstat(fd, &stat);
+               if (!ret)
+                       ret = ftruncate(fd, stat.st_size - size - 8);
+               if (ret)
+                       ret = -errno;
+       } /* Ignore if there is no boot config in initrd */
+
+       close(fd);
+       free(buf);
+
+       return ret;
+}
+
+int apply_xbc(const char *path, const char *xbc_path)
+{
+       u32 size, csum;
+       char *buf, *data;
+       int ret, fd;
+
+       ret = load_xbc_file(xbc_path, &buf);
+       if (ret < 0) {
+               printf("Failed to load %s : %d\n", xbc_path, ret);
+               return ret;
+       }
+       size = strlen(buf) + 1;
+       csum = checksum((unsigned char *)buf, size);
+
+       /* Prepare xbc_path data */
+       data = malloc(size + 8);
+       if (!data)
+               return -ENOMEM;
+       strcpy(data, buf);
+       *(u32 *)(data + size) = size;
+       *(u32 *)(data + size + 4) = csum;
+
+       /* Check the data format */
+       ret = xbc_init(buf);
+       if (ret < 0) {
+               printf("Failed to parse %s: %d\n", xbc_path, ret);
+               free(data);
+               free(buf);
+               return ret;
+       }
+       printf("Apply %s to %s\n", xbc_path, path);
+       printf("\tNumber of nodes: %d\n", ret);
+       printf("\tSize: %u bytes\n", (unsigned int)size);
+       printf("\tChecksum: %d\n", (unsigned int)csum);
+
+       /* TODO: Check the options by schema */
+       xbc_destroy_all();
+       free(buf);
+
+       /* Remove old boot config if exists */
+       ret = delete_xbc(path);
+       if (ret < 0) {
+               printf("Failed to delete previous boot config: %d\n", ret);
+               return ret;
+       }
+
+       /* Apply new one */
+       fd = open(path, O_RDWR | O_APPEND);
+       if (fd < 0) {
+               printf("Failed to open %s: %d\n", path, fd);
+               return fd;
+       }
+       /* TODO: Ensure the @path is initramfs/initrd image */
+       ret = write(fd, data, size + 8);
+       if (ret < 0) {
+               printf("Failed to apply a boot config: %d\n", ret);
+               return ret;
+       }
+       close(fd);
+       free(data);
+
+       return 0;
+}
+
+int usage(void)
+{
+       printf("Usage: bootconfig [OPTIONS] <INITRD>\n"
+               " Apply, delete or show boot config to initrd.\n"
+               " Options:\n"
+               "               -a <config>: Apply boot config to initrd\n"
+               "               -d : Delete boot config file from initrd\n\n"
+               " If no option is given, show current applied boot config.\n");
+       return -1;
+}
+
+int main(int argc, char **argv)
+{
+       char *path = NULL;
+       char *apply = NULL;
+       bool delete = false;
+       int opt;
+
+       while ((opt = getopt(argc, argv, "hda:")) != -1) {
+               switch (opt) {
+               case 'd':
+                       delete = true;
+                       break;
+               case 'a':
+                       apply = optarg;
+                       break;
+               case 'h':
+               default:
+                       return usage();
+               }
+       }
+
+       if (apply && delete) {
+               printf("Error: You can not specify both -a and -d at once.\n");
+               return usage();
+       }
+
+       if (optind >= argc) {
+               printf("Error: No initrd is specified.\n");
+               return usage();
+       }
+
+       path = argv[optind];
+
+       if (apply)
+               return apply_xbc(path, apply);
+       else if (delete)
+               return delete_xbc(path);
+
+       return show_xbc(path);
+}
diff --git a/tools/bootconfig/samples/bad-array-space-comment.bconf b/tools/bootconfig/samples/bad-array-space-comment.bconf
new file mode 100644 (file)
index 0000000..fda19e4
--- /dev/null
@@ -0,0 +1,5 @@
+key =  # comment
+       "value1",         # comment1
+       "value2"          # comment2
+,
+       "value3"
diff --git a/tools/bootconfig/samples/bad-array.bconf b/tools/bootconfig/samples/bad-array.bconf
new file mode 100644 (file)
index 0000000..0174af0
--- /dev/null
@@ -0,0 +1,2 @@
+# Array must be comma separated.
+key = "value1" "value2"
diff --git a/tools/bootconfig/samples/bad-dotword.bconf b/tools/bootconfig/samples/bad-dotword.bconf
new file mode 100644 (file)
index 0000000..ba5557b
--- /dev/null
@@ -0,0 +1,4 @@
+# do not start keyword with .
+key {
+  .word = 1
+}
diff --git a/tools/bootconfig/samples/bad-empty.bconf b/tools/bootconfig/samples/bad-empty.bconf
new file mode 100644 (file)
index 0000000..2ba3f6c
--- /dev/null
@@ -0,0 +1 @@
+# Wrong boot config: comment only
diff --git a/tools/bootconfig/samples/bad-keyerror.bconf b/tools/bootconfig/samples/bad-keyerror.bconf
new file mode 100644 (file)
index 0000000..b6e247a
--- /dev/null
@@ -0,0 +1,2 @@
+# key word can not contain ","
+key,word
diff --git a/tools/bootconfig/samples/bad-longkey.bconf b/tools/bootconfig/samples/bad-longkey.bconf
new file mode 100644 (file)
index 0000000..eb97369
--- /dev/null
@@ -0,0 +1 @@
+key_word_is_too_long01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345
diff --git a/tools/bootconfig/samples/bad-manywords.bconf b/tools/bootconfig/samples/bad-manywords.bconf
new file mode 100644 (file)
index 0000000..8db8196
--- /dev/null
@@ -0,0 +1 @@
+key1.is2.too3.long4.5.6.7.8.9.10.11.12.13.14.15.16.17
diff --git a/tools/bootconfig/samples/bad-no-keyword.bconf b/tools/bootconfig/samples/bad-no-keyword.bconf
new file mode 100644 (file)
index 0000000..eff2680
--- /dev/null
@@ -0,0 +1,2 @@
+# No keyword
+{}
diff --git a/tools/bootconfig/samples/bad-nonprintable.bconf b/tools/bootconfig/samples/bad-nonprintable.bconf
new file mode 100644 (file)
index 0000000..3bb1a28
--- /dev/null
@@ -0,0 +1,2 @@
+# Non printable
+key = "\ 2"
diff --git a/tools/bootconfig/samples/bad-spaceword.bconf b/tools/bootconfig/samples/bad-spaceword.bconf
new file mode 100644 (file)
index 0000000..90c703d
--- /dev/null
@@ -0,0 +1,2 @@
+# No space between words
+key . word
diff --git a/tools/bootconfig/samples/bad-tree.bconf b/tools/bootconfig/samples/bad-tree.bconf
new file mode 100644 (file)
index 0000000..5a6038e
--- /dev/null
@@ -0,0 +1,5 @@
+# brace is not closing
+tree {
+  node {
+    value = 1
+}
diff --git a/tools/bootconfig/samples/bad-value.bconf b/tools/bootconfig/samples/bad-value.bconf
new file mode 100644 (file)
index 0000000..a1217fe
--- /dev/null
@@ -0,0 +1,3 @@
+# Quotes error
+value = "data
+
diff --git a/tools/bootconfig/samples/escaped.bconf b/tools/bootconfig/samples/escaped.bconf
new file mode 100644 (file)
index 0000000..9f72043
--- /dev/null
@@ -0,0 +1,3 @@
+key1 = "A\B\C"
+key2 = '\'\''
+key3 = "\\"
diff --git a/tools/bootconfig/samples/good-array-space-comment.bconf b/tools/bootconfig/samples/good-array-space-comment.bconf
new file mode 100644 (file)
index 0000000..45b938d
--- /dev/null
@@ -0,0 +1,4 @@
+key =  # comment
+       "value1",         # comment1
+       "value2"         , # comment2
+       "value3"
diff --git a/tools/bootconfig/samples/good-comment-after-value.bconf b/tools/bootconfig/samples/good-comment-after-value.bconf
new file mode 100644 (file)
index 0000000..0d92a85
--- /dev/null
@@ -0,0 +1 @@
+key = "value"  # comment
diff --git a/tools/bootconfig/samples/good-printables.bconf b/tools/bootconfig/samples/good-printables.bconf
new file mode 100644 (file)
index 0000000..ebb985a
--- /dev/null
@@ -0,0 +1,2 @@
+key = "        
+\v\f !#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
diff --git a/tools/bootconfig/samples/good-simple.bconf b/tools/bootconfig/samples/good-simple.bconf
new file mode 100644 (file)
index 0000000..37dd6d2
--- /dev/null
@@ -0,0 +1,11 @@
+# A good simple bootconfig
+
+key.word1 = 1
+key.word2=2
+key.word3 = 3;
+
+key {
+word4 = 4 }
+
+key { word5 = 5; word6 = 6 }
+
diff --git a/tools/bootconfig/samples/good-single.bconf b/tools/bootconfig/samples/good-single.bconf
new file mode 100644 (file)
index 0000000..98e55ad
--- /dev/null
@@ -0,0 +1,4 @@
+# single key style
+key = 1
+key2 = 2
+key3 = "alpha", "beta"
diff --git a/tools/bootconfig/samples/good-space-after-value.bconf b/tools/bootconfig/samples/good-space-after-value.bconf
new file mode 100644 (file)
index 0000000..56c15cb
--- /dev/null
@@ -0,0 +1 @@
+key = "value"   
diff --git a/tools/bootconfig/samples/good-tree.bconf b/tools/bootconfig/samples/good-tree.bconf
new file mode 100644 (file)
index 0000000..f2ddefc
--- /dev/null
@@ -0,0 +1,12 @@
+key {
+  word {
+    tree {
+      value = "0"}
+  }
+  word2 {
+    tree {
+      value = 1,2 }
+  }
+}
+other.tree {
+  value = 2; value2 = 3;}
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
new file mode 100755 (executable)
index 0000000..87725e8
--- /dev/null
@@ -0,0 +1,105 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+echo "Boot config test script"
+
+BOOTCONF=./bootconfig
+INITRD=`mktemp initrd-XXXX`
+TEMPCONF=`mktemp temp-XXXX.bconf`
+NG=0
+
+cleanup() {
+  rm -f $INITRD $TEMPCONF
+  exit $NG
+}
+
+trap cleanup EXIT TERM
+
+NO=1
+
+xpass() { # pass test command
+  echo "test case $NO ($3)... "
+  if ! ($@ && echo "\t\t[OK]"); then
+     echo "\t\t[NG]"; NG=$((NG + 1))
+  fi
+  NO=$((NO + 1))
+}
+
+xfail() { # fail test command
+  echo "test case $NO ($3)... "
+  if ! (! $@ && echo "\t\t[OK]"); then
+     echo "\t\t[NG]"; NG=$((NG + 1))
+  fi
+  NO=$((NO + 1))
+}
+
+echo "Basic command test"
+xpass $BOOTCONF $INITRD
+
+echo "Delete command should success without bootconfig"
+xpass $BOOTCONF -d $INITRD
+
+dd if=/dev/zero of=$INITRD bs=4096 count=1
+echo "key = value;" > $TEMPCONF
+bconf_size=$(stat -c %s $TEMPCONF)
+initrd_size=$(stat -c %s $INITRD)
+
+echo "Apply command test"
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+new_size=$(stat -c %s $INITRD)
+
+echo "File size check"
+xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9)
+
+echo "Apply command repeat test"
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "File size check"
+xpass test $new_size -eq $(stat -c %s $INITRD)
+
+echo "Delete command check"
+xpass $BOOTCONF -d $INITRD
+
+echo "File size check"
+new_size=$(stat -c %s $INITRD)
+xpass test $new_size -eq $initrd_size
+
+echo "Max node number check"
+
+echo -n > $TEMPCONF
+for i in `seq 1 1024` ; do
+   echo "node$i" >> $TEMPCONF
+done
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "badnode" >> $TEMPCONF
+xfail $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "Max filesize check"
+
+# Max size is 32767 (including terminal byte)
+echo -n "data = \"" > $TEMPCONF
+dd if=/dev/urandom bs=768 count=32 | base64 -w0 >> $TEMPCONF
+echo "\"" >> $TEMPCONF
+xfail $BOOTCONF -a $TEMPCONF $INITRD
+
+truncate -s 32764 $TEMPCONF
+echo "\"" >> $TEMPCONF # add 2 bytes + terminal ('\"\n\0')
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "=== expected failure cases ==="
+for i in samples/bad-* ; do
+  xfail $BOOTCONF -a $i $INITRD
+done
+
+echo "=== expected success cases ==="
+for i in samples/good-* ; do
+  xpass $BOOTCONF -a $i $INITRD
+done
+
+echo
+if [ $NG -eq 0 ]; then
+       echo "All tests passed"
+else
+       echo "$NG tests failed"
+fi
index 140c836..5fca38f 100644 (file)
 #include <linux/bits.h>
 #include <linux/compiler.h>
 
-#define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#define BITS_TO_U64(nr)                DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
-#define BITS_TO_U32(nr)                DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
-#define BITS_TO_BYTES(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE)
+#define BITS_PER_TYPE(type)    (sizeof(type) * BITS_PER_BYTE)
+#define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr)                DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr)                DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
+#define BITS_TO_BYTES(nr)      DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
 
 extern unsigned int __sw_hweight8(unsigned int w);
 extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc
new file mode 100644 (file)
index 0000000..d44087a
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test histogram parser errors
+
+if [ ! -f set_event -o ! -d events/kmem ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/kmem/kmalloc/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/kmem/kmalloc/hist ]; then
+    echo "hist trigger is not supported"
+    exit_unsupported
+fi
+
+[ -f error_log ] || exit_unsupported
+
+check_error() { # command-with-error-pos-by-^
+    ftrace_errlog_check 'hist:kmem:kmalloc' "$1" 'events/kmem/kmalloc/trigger'
+}
+
+check_error 'hist:keys=common_pid:vals=bytes_req:sort=common_pid,^junk'        # INVALID_SORT_FIELD
+check_error 'hist:keys=common_pid:vals=bytes_req:^sort='               # EMPTY_ASSIGNMENT
+check_error 'hist:keys=common_pid:vals=bytes_req:^sort=common_pid,'    # EMPTY_SORT_FIELD
+check_error 'hist:keys=common_pid:vals=bytes_req:sort=common_pid.^junk'        # INVALID_SORT_MODIFIER
+check_error 'hist:keys=common_pid:vals=bytes_req,bytes_alloc:^sort=common_pid,bytes_req,bytes_alloc'   # TOO_MANY_SORT_FIELDS
+
+exit 0
index 8aefd81..ecc52d4 100644 (file)
@@ -22,3 +22,4 @@ ipv6_flowlabel_mgr
 so_txtime
 tcp_fastopen_backup_key
 nettest
+fin_ack_lat
index a8e04d6..b569419 100644 (file)
@@ -11,6 +11,7 @@ TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
 TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
 TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
 TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
+TEST_PROGS += fin_ack_lat.sh
 TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket nettest
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
@@ -18,6 +19,7 @@ TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
 TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
 TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr
 TEST_GEN_FILES += tcp_fastopen_backup_key
+TEST_GEN_FILES += fin_ack_lat
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
 TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
 
diff --git a/tools/testing/selftests/net/fin_ack_lat.c b/tools/testing/selftests/net/fin_ack_lat.c
new file mode 100644 (file)
index 0000000..7018749
--- /dev/null
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+static int child_pid;
+
+static unsigned long timediff(struct timeval s, struct timeval e)
+{
+       unsigned long s_us, e_us;
+
+       s_us = s.tv_sec * 1000000 + s.tv_usec;
+       e_us = e.tv_sec * 1000000 + e.tv_usec;
+       if (s_us > e_us)
+               return 0;
+       return e_us - s_us;
+}
+
+static void client(int port)
+{
+       int sock = 0;
+       struct sockaddr_in addr, laddr;
+       socklen_t len = sizeof(laddr);
+       struct linger sl;
+       int flag = 1;
+       int buffer;
+       struct timeval start, end;
+       unsigned long lat, sum_lat = 0, nr_lat = 0;
+
+       while (1) {
+               gettimeofday(&start, NULL);
+
+               sock = socket(AF_INET, SOCK_STREAM, 0);
+               if (sock < 0)
+                       error(-1, errno, "socket creation");
+
+               sl.l_onoff = 1;
+               sl.l_linger = 0;
+               if (setsockopt(sock, SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)))
+                       error(-1, errno, "setsockopt(linger)");
+
+               if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
+                                       &flag, sizeof(flag)))
+                       error(-1, errno, "setsockopt(nodelay)");
+
+               addr.sin_family = AF_INET;
+               addr.sin_port = htons(port);
+
+               if (inet_pton(AF_INET, "127.0.0.1", &addr.sin_addr) <= 0)
+                       error(-1, errno, "inet_pton");
+
+               if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) < 0)
+                       error(-1, errno, "connect");
+
+               send(sock, &buffer, sizeof(buffer), 0);
+               if (read(sock, &buffer, sizeof(buffer)) == -1)
+                       error(-1, errno, "waiting read");
+
+               gettimeofday(&end, NULL);
+               lat = timediff(start, end);
+               sum_lat += lat;
+               nr_lat++;
+               if (lat < 100000)
+                       goto close;
+
+               if (getsockname(sock, (struct sockaddr *)&laddr, &len) == -1)
+                       error(-1, errno, "getsockname");
+               printf("port: %d, lat: %lu, avg: %lu, nr: %lu\n",
+                               ntohs(laddr.sin_port), lat,
+                               sum_lat / nr_lat, nr_lat);
+close:
+               fflush(stdout);
+               close(sock);
+       }
+}
+
+static void server(int sock, struct sockaddr_in address)
+{
+       int accepted;
+       int addrlen = sizeof(address);
+       int buffer;
+
+       while (1) {
+               accepted = accept(sock, (struct sockaddr *)&address,
+                               (socklen_t *)&addrlen);
+               if (accepted < 0)
+                       error(-1, errno, "accept");
+
+               if (read(accepted, &buffer, sizeof(buffer)) == -1)
+                       error(-1, errno, "read");
+               close(accepted);
+       }
+}
+
+static void sig_handler(int signum)
+{
+       kill(SIGTERM, child_pid);
+       exit(0);
+}
+
+int main(int argc, char const *argv[])
+{
+       int sock;
+       int opt = 1;
+       struct sockaddr_in address;
+       struct sockaddr_in laddr;
+       socklen_t len = sizeof(laddr);
+
+       if (signal(SIGTERM, sig_handler) == SIG_ERR)
+               error(-1, errno, "signal");
+
+       sock = socket(AF_INET, SOCK_STREAM, 0);
+       if (sock < 0)
+               error(-1, errno, "socket");
+
+       if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT,
+                               &opt, sizeof(opt)) == -1)
+               error(-1, errno, "setsockopt");
+
+       address.sin_family = AF_INET;
+       address.sin_addr.s_addr = INADDR_ANY;
+       /* dynamically allocate unused port */
+       address.sin_port = 0;
+
+       if (bind(sock, (struct sockaddr *)&address, sizeof(address)) < 0)
+               error(-1, errno, "bind");
+
+       if (listen(sock, 3) < 0)
+               error(-1, errno, "listen");
+
+       if (getsockname(sock, (struct sockaddr *)&laddr, &len) == -1)
+               error(-1, errno, "getsockname");
+
+       fprintf(stderr, "server port: %d\n", ntohs(laddr.sin_port));
+       child_pid = fork();
+       if (!child_pid)
+               client(ntohs(laddr.sin_port));
+       else
+               server(sock, laddr);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/net/fin_ack_lat.sh b/tools/testing/selftests/net/fin_ack_lat.sh
new file mode 100755 (executable)
index 0000000..a3ff6e0
--- /dev/null
@@ -0,0 +1,35 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test latency spikes caused by FIN/ACK handling race.
+
+set +x
+set -e
+
+tmpfile=$(mktemp /tmp/fin_ack_latency.XXXX.log)
+
+cleanup() {
+       kill $(pidof fin_ack_lat)
+       rm -f $tmpfile
+}
+
+trap cleanup EXIT
+
+do_test() {
+       RUNTIME=$1
+
+       ./fin_ack_lat | tee $tmpfile &
+       PID=$!
+
+       sleep $RUNTIME
+       NR_SPIKES=$(wc -l $tmpfile | awk '{print $1}')
+       if [ $NR_SPIKES -gt 0 ]
+       then
+               echo "FAIL: $NR_SPIKES spikes detected"
+               return 1
+       fi
+       return 0
+}
+
+do_test "30"
+echo "test done"
index 26112ab..f52ed92 100755 (executable)
@@ -53,9 +53,13 @@ eeh_one_dev() {
        # is a no-op.
        echo $dev >/sys/kernel/debug/powerpc/eeh_dev_check
 
-       # Enforce a 30s timeout for recovery. Even the IPR, which is infamously
-       # slow to reset, should recover within 30s.
-       max_wait=30
+       # Default to a 60s timeout when waiting for a device to recover. This
+       # is an arbitrary default which can be overridden by setting the
+       # EEH_MAX_WAIT environmental variable when required.
+
+       # The current record holder for longest recovery time is:
+       #  "Adaptec Series 8 12G SAS/PCIe 3" at 39 seconds
+       max_wait=${EEH_MAX_WAIT:=60}
 
        for i in `seq 0 ${max_wait}` ; do
                if pe_ok $dev ; then
index ed15658..b9103c4 100644 (file)
@@ -3,7 +3,7 @@ noarg:
        $(MAKE) -C ../
 
 TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
-                 large_vm_fork_separation
+                 large_vm_fork_separation bad_accesses
 TEST_GEN_PROGS_EXTENDED := tlbie_test
 TEST_GEN_FILES := tempfile
 
@@ -16,6 +16,7 @@ $(OUTPUT)/prot_sao: ../utils.c
 
 $(OUTPUT)/wild_bctr: CFLAGS += -m64
 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
+$(OUTPUT)/bad_accesses: CFLAGS += -m64
 
 $(OUTPUT)/tempfile:
        dd if=/dev/zero of=$@ bs=64k count=1
diff --git a/tools/testing/selftests/powerpc/mm/bad_accesses.c b/tools/testing/selftests/powerpc/mm/bad_accesses.c
new file mode 100644 (file)
index 0000000..adc465f
--- /dev/null
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2019, Michael Ellerman, IBM Corp.
+//
+// Test that out-of-bounds reads/writes behave as expected.
+
+#include <setjmp.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+// Old distros (Ubuntu 16.04 at least) don't define this
+#ifndef SEGV_BNDERR
+#define SEGV_BNDERR    3
+#endif
+
+// 64-bit kernel is always here
+#define PAGE_OFFSET    (0xcul << 60)
+
+static unsigned long kernel_virt_end;
+
+static volatile int fault_code;
+static volatile unsigned long fault_addr;
+static jmp_buf setjmp_env;
+
+static void segv_handler(int n, siginfo_t *info, void *ctxt_v)
+{
+       fault_code = info->si_code;
+       fault_addr = (unsigned long)info->si_addr;
+       siglongjmp(setjmp_env, 1);
+}
+
+int bad_access(char *p, bool write)
+{
+       char x;
+
+       fault_code = 0;
+       fault_addr = 0;
+
+       if (sigsetjmp(setjmp_env, 1) == 0) {
+               if (write)
+                       *p = 1;
+               else
+                       x = *p;
+
+               printf("Bad - no SEGV! (%c)\n", x);
+               return 1;
+       }
+
+       // If we see MAPERR that means we took a page fault rather than an SLB
+       // miss. We only expect to take page faults for addresses within the
+       // valid kernel range.
+       FAIL_IF(fault_code == SEGV_MAPERR && \
+               (fault_addr < PAGE_OFFSET || fault_addr >= kernel_virt_end));
+
+       FAIL_IF(fault_code != SEGV_MAPERR && fault_code != SEGV_BNDERR);
+
+       return 0;
+}
+
+static int using_hash_mmu(bool *using_hash)
+{
+       char line[128];
+       FILE *f;
+       int rc;
+
+       f = fopen("/proc/cpuinfo", "r");
+       FAIL_IF(!f);
+
+       rc = 0;
+       while (fgets(line, sizeof(line), f) != NULL) {
+               if (strcmp(line, "MMU           : Hash\n") == 0) {
+                       *using_hash = true;
+                       goto out;
+               }
+
+               if (strcmp(line, "MMU           : Radix\n") == 0) {
+                       *using_hash = false;
+                       goto out;
+               }
+       }
+
+       rc = -1;
+out:
+       fclose(f);
+       return rc;
+}
+
+static int test(void)
+{
+       unsigned long i, j, addr, region_shift, page_shift, page_size;
+       struct sigaction sig;
+       bool hash_mmu;
+
+       sig = (struct sigaction) {
+               .sa_sigaction = segv_handler,
+               .sa_flags = SA_SIGINFO,
+       };
+
+       FAIL_IF(sigaction(SIGSEGV, &sig, NULL) != 0);
+
+       FAIL_IF(using_hash_mmu(&hash_mmu));
+
+       page_size = sysconf(_SC_PAGESIZE);
+       if (page_size == (64 * 1024))
+               page_shift = 16;
+       else
+               page_shift = 12;
+
+       if (page_size == (64 * 1024) || !hash_mmu) {
+               region_shift = 52;
+
+               // We have 7 512T regions (4 kernel linear, vmalloc, io, vmemmap)
+               kernel_virt_end = PAGE_OFFSET + (7 * (512ul << 40));
+       } else if (page_size == (4 * 1024) && hash_mmu) {
+               region_shift = 46;
+
+               // We have 7 64T regions (4 kernel linear, vmalloc, io, vmemmap)
+               kernel_virt_end = PAGE_OFFSET + (7 * (64ul << 40));
+       } else
+               FAIL_IF(true);
+
+       printf("Using %s MMU, PAGE_SIZE = %dKB start address 0x%016lx\n",
+              hash_mmu ? "hash" : "radix",
+              (1 << page_shift) >> 10,
+              1ul << region_shift);
+
+       // This generates access patterns like:
+       //   0x0010000000000000
+       //   0x0010000000010000
+       //   0x0010000000020000
+       //   ...
+       //   0x0014000000000000
+       //   0x0018000000000000
+       //   0x0020000000000000
+       //   0x0020000000010000
+       //   0x0020000000020000
+       //   ...
+       //   0xf400000000000000
+       //   0xf800000000000000
+
+       for (i = 1; i <= ((0xful << 60) >> region_shift); i++) {
+               for (j = page_shift - 1; j < 60; j++) {
+                       unsigned long base, delta;
+
+                       base  = i << region_shift;
+                       delta = 1ul << j;
+
+                       if (delta >= base)
+                               break;
+
+                       addr = (base | delta) & ~((1 << page_shift) - 1);
+
+                       FAIL_IF(bad_access((char *)addr, false));
+                       FAIL_IF(bad_access((char *)addr, true));
+               }
+       }
+
+       return 0;
+}
+
+int main(void)
+{
+       return test_harness(test, "bad_accesses");
+}
index 7deedbc..fc477df 100644 (file)
@@ -455,9 +455,8 @@ run_tests(pid_t child_pid, struct ppc_debug_info *dbginfo, bool dawr)
        if (dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_RANGE) {
                test_sethwdebug_exact(child_pid);
 
-               if (!is_8xx)
-                       test_sethwdebug_range_aligned(child_pid);
-               if (dawr && !is_8xx) {
+               test_sethwdebug_range_aligned(child_pid);
+               if (dawr || is_8xx) {
                        test_sethwdebug_range_unaligned(child_pid);
                        test_sethwdebug_range_unaligned_dar(child_pid);
                        test_sethwdebug_dawr_max_range(child_pid);
index e98c367..d34fe06 100644 (file)
@@ -54,7 +54,7 @@ class SubPlugin(TdcPlugin):
             shell=True,
             stdout=subprocess.PIPE,
             stderr=subprocess.PIPE,
-            env=ENVIR)
+            env=os.environ.copy())
         (rawout, serr) = proc.communicate()
 
         if proc.returncode != 0 and len(serr) > 0:
index 2e361ce..98a20fa 100644 (file)
@@ -6,6 +6,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -25,6 +28,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -44,6 +50,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -63,6 +72,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -82,6 +94,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],