Merge tag 'folio-5.18d' of git://git.infradead.org/users/willy/pagecache
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Apr 2022 20:50:50 +0000 (13:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Apr 2022 20:50:50 +0000 (13:50 -0700)
Pull more filesystem folio updates from Matthew Wilcox:
 "A mixture of odd changes that didn't quite make it into the original
  pull and fixes for things that did. Also the readpages changes had to
  wait for the NFS tree to be pulled first.

   - Remove ->readpages infrastructure

   - Remove AOP_FLAG_CONT_EXPAND

   - Move read_descriptor_t to networking code

   - Pass the iocb to generic_perform_write

   - Minor updates to iomap, btrfs, ext4, f2fs, ntfs"

* tag 'folio-5.18d' of git://git.infradead.org/users/willy/pagecache:
  btrfs: Remove a use of PAGE_SIZE in btrfs_invalidate_folio()
  ntfs: Correct mark_ntfs_record_dirty() folio conversion
  f2fs: Get the superblock from the mapping instead of the page
  f2fs: Correct f2fs_dirty_data_folio() conversion
  ext4: Correct ext4_journalled_dirty_folio() conversion
  filemap: Remove AOP_FLAG_CONT_EXPAND
  fs: Pass an iocb to generic_perform_write()
  fs, net: Move read_descriptor_t to net.h
  fs: Remove read_actor_t
  iomap: Simplify is_partially_uptodate a little
  readahead: Update comments
  mm: remove the skip_page argument to read_pages
  mm: remove the pages argument to read_pages
  fs: Remove ->readpages address space operation
  readahead: Remove read_cache_pages()

893 files changed:
.mailmap
Documentation/ABI/testing/sysfs-bus-nvdimm
Documentation/admin-guide/kernel-parameters.txt
Documentation/bpf/bpf_devel_QA.rst
Documentation/core-api/xarray.rst
Documentation/dev-tools/kunit/architecture.rst
Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
Documentation/devicetree/bindings/arm/idle-states.yaml [deleted file]
Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
Documentation/devicetree/bindings/arm/psci.yaml
Documentation/devicetree/bindings/clock/apple,nco.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
Documentation/devicetree/bindings/clock/cirrus,cs2000-cp.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/cs2000-cp.txt [deleted file]
Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
Documentation/devicetree/bindings/clock/imx1-clock.yaml
Documentation/devicetree/bindings/clock/imx21-clock.yaml
Documentation/devicetree/bindings/clock/imx23-clock.yaml
Documentation/devicetree/bindings/clock/imx25-clock.yaml
Documentation/devicetree/bindings/clock/imx27-clock.yaml
Documentation/devicetree/bindings/clock/imx28-clock.yaml
Documentation/devicetree/bindings/clock/imx31-clock.yaml
Documentation/devicetree/bindings/clock/imx35-clock.yaml
Documentation/devicetree/bindings/clock/imx7ulp-pcc-clock.yaml
Documentation/devicetree/bindings/clock/imx7ulp-scg-clock.yaml
Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml
Documentation/devicetree/bindings/clock/imx93-clock.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/imxrt1050-clock.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/nvidia,tegra124-car.yaml
Documentation/devicetree/bindings/clock/nvidia,tegra20-car.yaml
Documentation/devicetree/bindings/clock/qcom,a7pll.yaml
Documentation/devicetree/bindings/clock/qcom,camcc.txt [deleted file]
Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,dispcc-sm6350.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
Documentation/devicetree/bindings/clock/qcom,gcc-ipq8064.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,gcc.yaml
Documentation/devicetree/bindings/clock/qcom,gpucc.yaml
Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
Documentation/devicetree/bindings/clock/qcom,qcm2290-dispcc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
Documentation/devicetree/bindings/clock/qcom,sdm845-camcc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/renesas,9series.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml
Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
Documentation/devicetree/bindings/clock/starfive,jh7100-audclk.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/cpu/idle-states.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt [deleted file]
Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
Documentation/devicetree/bindings/hwlock/ti,omap-hwspinlock.yaml
Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
Documentation/devicetree/bindings/net/qcom,ethqos.txt
Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml
Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
Documentation/devicetree/bindings/pwm/imx-pwm.yaml
Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml
Documentation/devicetree/bindings/pwm/intel,keembay-pwm.yaml
Documentation/devicetree/bindings/pwm/intel,lgm-pwm.yaml
Documentation/devicetree/bindings/pwm/iqs620a-pwm.yaml
Documentation/devicetree/bindings/pwm/mxs-pwm.yaml
Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml
Documentation/devicetree/bindings/pwm/pwm-samsung.yaml
Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
Documentation/devicetree/bindings/pwm/pwm-tiecap.yaml
Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.yaml
Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
Documentation/devicetree/bindings/pwm/toshiba,pwm-visconti.yaml
Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
Documentation/devicetree/bindings/remoteproc/qcom,hexagon-v56.txt [deleted file]
Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/riscv/cpus.yaml
Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt [deleted file]
Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/serial/sifive-serial.yaml
Documentation/devicetree/bindings/timer/ingenic,tcu.yaml
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
Documentation/driver-api/nvdimm/nvdimm.rst
Documentation/filesystems/netfs_library.rst
Documentation/kbuild/kbuild.rst
Documentation/kbuild/llvm.rst
Documentation/kbuild/makefiles.rst
Documentation/locking/locktypes.rst
Documentation/maintainer/index.rst
Documentation/maintainer/messy-diffstat.rst [new file with mode: 0644]
Documentation/networking/index.rst
Documentation/networking/netdev-FAQ.rst [deleted file]
Documentation/process/maintainer-handbooks.rst
Documentation/process/maintainer-netdev.rst [new file with mode: 0644]
Documentation/riscv/index.rst
Documentation/sphinx/kernel_abi.py
Documentation/sphinx/kernel_feat.py
Documentation/sphinx/kernel_include.py
Documentation/sphinx/kerneldoc.py
Documentation/sphinx/kfigure.py
Documentation/sphinx/requirements.txt
Documentation/staging/remoteproc.rst
Documentation/virt/uml/user_mode_linux_howto_v2.rst
Documentation/vm/page_owner.rst
Documentation/vm/unevictable-lru.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/kernel/syscalls/Makefile
arch/arm/boot/dts/spear1340.dtsi
arch/arm/boot/dts/spear13xx.dtsi
arch/arm/crypto/Kconfig
arch/arm/mach-omap2/omap-secure.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mm.h
arch/arm/tools/Makefile
arch/arm64/boot/dts/amd/Makefile
arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
arch/arm64/boot/dts/amd/amd-overdrive.dts [deleted file]
arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
arch/arm64/boot/dts/amd/husky.dts [deleted file]
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
arch/ia64/kernel/syscalls/Makefile
arch/m68k/kernel/syscalls/Makefile
arch/microblaze/boot/Makefile
arch/microblaze/boot/dts/Makefile
arch/microblaze/kernel/syscalls/Makefile
arch/mips/kernel/syscalls/Makefile
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/pdc.h
arch/parisc/include/asm/pdcpat.h
arch/parisc/include/asm/processor.h
arch/parisc/include/asm/smp.h
arch/parisc/include/asm/special_insns.h
arch/parisc/include/asm/topology.h
arch/parisc/kernel/Makefile
arch/parisc/kernel/cache.c
arch/parisc/kernel/firmware.c
arch/parisc/kernel/head.S
arch/parisc/kernel/irq.c
arch/parisc/kernel/pacache.S
arch/parisc/kernel/patch.c
arch/parisc/kernel/process.c
arch/parisc/kernel/processor.c
arch/parisc/kernel/smp.c
arch/parisc/kernel/syscalls/Makefile
arch/parisc/kernel/time.c
arch/parisc/kernel/topology.c
arch/powerpc/include/asm/device.h
arch/powerpc/kernel/syscalls/Makefile
arch/powerpc/platforms/pseries/papr_scm.c
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
arch/riscv/boot/dts/sifive/fu540-c000.dtsi
arch/riscv/boot/dts/sifive/fu740-c000.dtsi
arch/riscv/configs/defconfig
arch/riscv/configs/nommu_k210_defconfig
arch/riscv/configs/nommu_k210_sdcard_defconfig
arch/riscv/configs/nommu_virt_defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/cpuidle.h [new file with mode: 0644]
arch/riscv/include/asm/current.h
arch/riscv/include/asm/module.lds.h
arch/riscv/include/asm/suspend.h [new file with mode: 0644]
arch/riscv/include/asm/thread_info.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/asm-offsets.c
arch/riscv/kernel/cpu.c
arch/riscv/kernel/cpu_ops_sbi.c
arch/riscv/kernel/head.S
arch/riscv/kernel/module.c
arch/riscv/kernel/perf_callchain.c
arch/riscv/kernel/process.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/suspend.c [new file with mode: 0644]
arch/riscv/kernel/suspend_entry.S [new file with mode: 0644]
arch/s390/Kconfig
arch/s390/include/asm/alternative-asm.h
arch/s390/include/asm/alternative.h
arch/s390/include/asm/ap.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/spinlock.h
arch/s390/include/asm/syscall_wrapper.h
arch/s390/include/asm/unwind.h
arch/s390/kernel/entry.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/os_info.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/syscalls/Makefile
arch/s390/kernel/traps.c
arch/s390/kernel/unwind_bc.c
arch/s390/lib/spinlock.c
arch/s390/lib/test_unwind.c
arch/s390/pci/pci.c
arch/s390/pci/pci_bus.h
arch/s390/pci/pci_clp.c
arch/s390/pci/pci_event.c
arch/sh/kernel/syscalls/Makefile
arch/sparc/kernel/syscalls/Makefile
arch/um/Makefile
arch/um/drivers/mconsole_kern.c
arch/um/drivers/port_user.c
arch/um/drivers/ubd_kern.c
arch/um/drivers/vector_kern.c
arch/um/drivers/vector_kern.h
arch/um/drivers/vector_user.c
arch/um/drivers/vector_user.h
arch/um/include/asm/xor.h
arch/um/include/shared/os.h
arch/um/kernel/dtb.c
arch/um/os-Linux/file.c
arch/um/os-Linux/helper.c
arch/um/os-Linux/time.c
arch/x86/Kconfig
arch/x86/crypto/chacha-avx512vl-x86_64.S
arch/x86/crypto/poly1305-x86_64-cryptogams.pl
arch/x86/crypto/sm3-avx-asm_64.S
arch/x86/entry/syscalls/Makefile
arch/x86/include/asm/unwind.h
arch/x86/kernel/Makefile
arch/x86/kernel/kprobes/common.h
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/rethook.c [new file with mode: 0644]
arch/x86/kernel/unwind_orc.c
arch/x86/um/shared/sysdep/syscalls_64.h
arch/x86/um/syscalls_64.c
arch/xtensa/kernel/syscalls/Makefile
certs/Makefile
certs/system_certificates.S
drivers/acpi/acpi_ipmi.c
drivers/acpi/apei/apei-base.c
drivers/acpi/cppc_acpi.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/nfit.h
drivers/acpi/tables.c
drivers/auxdisplay/lcd2s.c
drivers/char/Kconfig
drivers/char/random.c
drivers/clk/.kunitconfig [new file with mode: 0644]
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/actions/owl-s500.c
drivers/clk/actions/owl-s700.c
drivers/clk/actions/owl-s900.c
drivers/clk/at91/at91rm9200.c
drivers/clk/at91/at91sam9260.c
drivers/clk/at91/at91sam9g45.c
drivers/clk/at91/at91sam9n12.c
drivers/clk/at91/at91sam9rl.c
drivers/clk/at91/at91sam9x5.c
drivers/clk/at91/clk-master.c
drivers/clk/at91/dt-compat.c
drivers/clk/at91/pmc.h
drivers/clk/at91/sam9x60.c
drivers/clk/at91/sama5d2.c
drivers/clk/at91/sama5d3.c
drivers/clk/at91/sama5d4.c
drivers/clk/at91/sama7g5.c
drivers/clk/at91/sckc.c
drivers/clk/axis/clk-artpec6.c
drivers/clk/bcm/clk-bcm2835.c
drivers/clk/bcm/clk-iproc.h
drivers/clk/bcm/clk-kona-setup.c
drivers/clk/bcm/clk-raspberrypi.c
drivers/clk/clk-apple-nco.c [new file with mode: 0644]
drivers/clk/clk-clps711x.c
drivers/clk/clk-cs2000-cp.c
drivers/clk/clk-fixed-factor.c
drivers/clk/clk-fractional-divider.c
drivers/clk/clk-gate_test.c [new file with mode: 0644]
drivers/clk/clk-mux.c
drivers/clk/clk-oxnas.c
drivers/clk/clk-renesas-pcie.c [new file with mode: 0644]
drivers/clk/clk-si5341.c
drivers/clk/clk-stm32mp1.c
drivers/clk/clk.c
drivers/clk/clk_test.c [new file with mode: 0644]
drivers/clk/hisilicon/clk-hi3559a.c
drivers/clk/hisilicon/clk.c
drivers/clk/imx/Kconfig
drivers/clk/imx/Makefile
drivers/clk/imx/clk-composite-93.c [new file with mode: 0644]
drivers/clk/imx/clk-fracn-gppll.c [new file with mode: 0644]
drivers/clk/imx/clk-imx7d.c
drivers/clk/imx/clk-imx8dxl-rsrc.c [new file with mode: 0644]
drivers/clk/imx/clk-imx8mm.c
drivers/clk/imx/clk-imx8mn.c
drivers/clk/imx/clk-imx8mp.c
drivers/clk/imx/clk-imx8qxp-lpcg.c
drivers/clk/imx/clk-imx8qxp.c
drivers/clk/imx/clk-imx93.c [new file with mode: 0644]
drivers/clk/imx/clk-imxrt1050.c [new file with mode: 0644]
drivers/clk/imx/clk-pll14xx.c
drivers/clk/imx/clk-scu.h
drivers/clk/imx/clk-sscg-pll.c
drivers/clk/imx/clk.h
drivers/clk/loongson1/clk-loongson1c.c
drivers/clk/mediatek/clk-apmixed.c
drivers/clk/mediatek/clk-cpumux.c
drivers/clk/mediatek/clk-cpumux.h
drivers/clk/mediatek/clk-gate.c
drivers/clk/mediatek/clk-gate.h
drivers/clk/mediatek/clk-mt2701.c
drivers/clk/mediatek/clk-mt2712.c
drivers/clk/mediatek/clk-mt6765.c
drivers/clk/mediatek/clk-mt6779.c
drivers/clk/mediatek/clk-mt6797.c
drivers/clk/mediatek/clk-mt7622.c
drivers/clk/mediatek/clk-mt7629.c
drivers/clk/mediatek/clk-mt7986-apmixed.c
drivers/clk/mediatek/clk-mt8135.c
drivers/clk/mediatek/clk-mt8167.c
drivers/clk/mediatek/clk-mt8173.c
drivers/clk/mediatek/clk-mt8183.c
drivers/clk/mediatek/clk-mt8192.c
drivers/clk/mediatek/clk-mt8195-apmixedsys.c
drivers/clk/mediatek/clk-mt8195-apusys_pll.c
drivers/clk/mediatek/clk-mt8195-cam.c
drivers/clk/mediatek/clk-mt8195-ccu.c
drivers/clk/mediatek/clk-mt8195-img.c
drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
drivers/clk/mediatek/clk-mt8195-infra_ao.c
drivers/clk/mediatek/clk-mt8195-ipe.c
drivers/clk/mediatek/clk-mt8195-mfg.c
drivers/clk/mediatek/clk-mt8195-peri_ao.c
drivers/clk/mediatek/clk-mt8195-scp_adsp.c
drivers/clk/mediatek/clk-mt8195-topckgen.c
drivers/clk/mediatek/clk-mt8195-vdec.c
drivers/clk/mediatek/clk-mt8195-vdo0.c
drivers/clk/mediatek/clk-mt8195-vdo1.c
drivers/clk/mediatek/clk-mt8195-venc.c
drivers/clk/mediatek/clk-mt8195-vpp0.c
drivers/clk/mediatek/clk-mt8195-vpp1.c
drivers/clk/mediatek/clk-mt8195-wpe.c
drivers/clk/mediatek/clk-mt8516.c
drivers/clk/mediatek/clk-mtk.c
drivers/clk/mediatek/clk-mtk.h
drivers/clk/mediatek/clk-mux.c
drivers/clk/mediatek/clk-mux.h
drivers/clk/mediatek/clk-pll.c
drivers/clk/mediatek/clk-pll.h [new file with mode: 0644]
drivers/clk/mediatek/reset.c
drivers/clk/meson/meson8b.c
drivers/clk/microchip/Kconfig [new file with mode: 0644]
drivers/clk/microchip/Makefile
drivers/clk/microchip/clk-mpfs.c [new file with mode: 0644]
drivers/clk/mmp/clk-of-mmp2.c
drivers/clk/mmp/pwr-island.c
drivers/clk/mvebu/armada-37xx-periph.c
drivers/clk/nxp/clk-lpc18xx-cgu.c
drivers/clk/pistachio/clk-pistachio.c
drivers/clk/qcom/Kconfig
drivers/clk/qcom/Makefile
drivers/clk/qcom/camcc-sc7180.c
drivers/clk/qcom/camcc-sdm845.c
drivers/clk/qcom/clk-rcg.c
drivers/clk/qcom/clk-rcg.h
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-rpmh.c
drivers/clk/qcom/clk-smd-rpm.c
drivers/clk/qcom/dispcc-qcm2290.c [new file with mode: 0644]
drivers/clk/qcom/dispcc-sm6125.c [new file with mode: 0644]
drivers/clk/qcom/dispcc-sm6350.c [new file with mode: 0644]
drivers/clk/qcom/gcc-ipq806x.c
drivers/clk/qcom/gcc-ipq8074.c
drivers/clk/qcom/gcc-msm8994.c
drivers/clk/qcom/gcc-msm8996.c
drivers/clk/qcom/gcc-sm6125.c
drivers/clk/qcom/gcc-sm8150.c
drivers/clk/qcom/gpucc-sdm660.c
drivers/clk/qcom/gpucc-sm6350.c [new file with mode: 0644]
drivers/clk/qcom/kpss-xcc.c
drivers/clk/qcom/mmcc-msm8974.c
drivers/clk/qcom/videocc-sc7180.c
drivers/clk/renesas/Kconfig
drivers/clk/renesas/Makefile
drivers/clk/renesas/r8a77990-cpg-mssr.c
drivers/clk/renesas/r8a77995-cpg-mssr.c
drivers/clk/renesas/r8a779a0-cpg-mssr.c
drivers/clk/renesas/r8a779f0-cpg-mssr.c
drivers/clk/renesas/r9a07g044-cpg.c
drivers/clk/renesas/rzg2l-cpg.c
drivers/clk/renesas/rzg2l-cpg.h
drivers/clk/rockchip/clk-rk3568.c
drivers/clk/rockchip/clk.c
drivers/clk/sifive/Makefile
drivers/clk/sifive/fu540-prci.c [deleted file]
drivers/clk/sifive/fu540-prci.h
drivers/clk/sifive/fu740-prci.c [deleted file]
drivers/clk/sifive/fu740-prci.h
drivers/clk/sifive/sifive-prci.c
drivers/clk/socfpga/clk-gate-s10.c
drivers/clk/socfpga/clk-periph-s10.c
drivers/clk/socfpga/clk-pll-s10.c
drivers/clk/socfpga/clk-s10.c
drivers/clk/starfive/Kconfig
drivers/clk/starfive/Makefile
drivers/clk/starfive/clk-starfive-jh7100-audio.c [new file with mode: 0644]
drivers/clk/starfive/clk-starfive-jh7100.c
drivers/clk/starfive/clk-starfive-jh7100.h [new file with mode: 0644]
drivers/clk/sunxi-ng/Kconfig
drivers/clk/sunxi-ng/Makefile
drivers/clk/sunxi-ng/ccu-sun6i-rtc.c [new file with mode: 0644]
drivers/clk/sunxi-ng/ccu-sun6i-rtc.h [new file with mode: 0644]
drivers/clk/sunxi-ng/ccu_common.h
drivers/clk/sunxi-ng/ccu_mux.c
drivers/clk/tegra/clk-tegra124-emc.c
drivers/clk/ti/Makefile
drivers/clk/ti/apll.c
drivers/clk/ti/autoidle.c
drivers/clk/ti/clk-33xx-compat.c [deleted file]
drivers/clk/ti/clk-33xx.c
drivers/clk/ti/clk-43xx-compat.c [deleted file]
drivers/clk/ti/clk-43xx.c
drivers/clk/ti/clk-7xx-compat.c [deleted file]
drivers/clk/ti/clk-7xx.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/clk.c
drivers/clk/ti/clkctrl.c
drivers/clk/ti/clock.h
drivers/clk/ti/clockdomain.c
drivers/clk/ti/composite.c
drivers/clk/ti/divider.c
drivers/clk/ti/dpll.c
drivers/clk/ti/fapll.c
drivers/clk/ti/fixed-factor.c
drivers/clk/ti/gate.c
drivers/clk/ti/interface.c
drivers/clk/ti/mux.c
drivers/clk/uniphier/clk-uniphier-fixed-rate.c
drivers/clk/visconti/clkc-tmpv770x.c
drivers/clk/visconti/clkc.c
drivers/clk/visconti/clkc.h
drivers/clk/zynq/clkc.c
drivers/clk/zynqmp/clk-gate-zynqmp.c
drivers/clk/zynqmp/clk-mux-zynqmp.c
drivers/clk/zynqmp/divider.c
drivers/clk/zynqmp/pll.c
drivers/cpuidle/Kconfig
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/Kconfig.riscv [new file with mode: 0644]
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-psci-domain.c
drivers/cpuidle/cpuidle-psci.h
drivers/cpuidle/cpuidle-riscv-sbi.c [new file with mode: 0644]
drivers/cpuidle/dt_idle_genpd.c [new file with mode: 0644]
drivers/cpuidle/dt_idle_genpd.h [new file with mode: 0644]
drivers/crypto/stm32/stm32-crc32.c
drivers/crypto/virtio/Kconfig
drivers/crypto/virtio/Makefile
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c [new file with mode: 0644]
drivers/crypto/virtio/virtio_crypto_algs.c [deleted file]
drivers/crypto/virtio/virtio_crypto_common.h
drivers/crypto/virtio/virtio_crypto_core.c
drivers/crypto/virtio/virtio_crypto_mgr.c
drivers/crypto/virtio/virtio_crypto_skcipher_algs.c [new file with mode: 0644]
drivers/dma/altera-msgdma.c
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/dw-axi-dmac/dw-axi-dmac.h
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
drivers/dma/hisi_dma.c
drivers/dma/idxd/device.c
drivers/dma/idxd/init.c
drivers/dma/imx-sdma.c
drivers/dma/ioat/init.c
drivers/dma/ppc4xx/adma.c
drivers/dma/ptdma/ptdma-dmaengine.c
drivers/dma/qcom/hidma.c
drivers/dma/sh/Kconfig
drivers/dma/sh/shdma-base.c
drivers/dma/stm32-dma.c
drivers/dma/ti/Makefile
drivers/dma/ti/cppi41.c
drivers/dma/ti/edma.c
drivers/dma/ti/k3-psil-am62.c [new file with mode: 0644]
drivers/dma/ti/k3-psil-priv.h
drivers/dma/ti/k3-psil.c
drivers/dma/ti/k3-udma.c
drivers/dma/ti/omap-dma.c
drivers/gpio/gpio-ts4900.c
drivers/gpio/gpio-ts5500.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/hid-google-hammer.c
drivers/hid/hid-vivaldi-common.c [new file with mode: 0644]
drivers/hid/hid-vivaldi-common.h [new file with mode: 0644]
drivers/hid/hid-vivaldi.c
drivers/hwspinlock/sprd_hwspinlock.c
drivers/hwspinlock/stm32_hwspinlock.c
drivers/i3c/master.c
drivers/input/Kconfig
drivers/input/Makefile
drivers/input/input.c
drivers/input/joystick/adi.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/Makefile
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/cros_ec_keyb.c
drivers/input/keyboard/mt6779-keypad.c [new file with mode: 0644]
drivers/input/keyboard/mtk-pmic-keys.c
drivers/input/misc/da9063_onkey.c
drivers/input/mouse/synaptics.c
drivers/input/serio/ps2-gpio.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/goodix.h
drivers/input/touchscreen/imagis.c [new file with mode: 0644]
drivers/input/touchscreen/iqs5xx.c
drivers/input/touchscreen/stmfts.c
drivers/input/touchscreen/tsc200x-core.c
drivers/input/vivaldi-fmap.c [new file with mode: 0644]
drivers/mtd/ubi/build.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/vmt.c
drivers/net/can/m_can/m_can.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/mcba_usb.c
drivers/net/can/usb/usb_8dev.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/microchip/sparx5/Kconfig
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/virtio_net.c
drivers/net/vxlan/vxlan_vnifilter.c
drivers/net/wireguard/queueing.c
drivers/net/wireguard/socket.c
drivers/nvdimm/Kconfig
drivers/nvdimm/Makefile
drivers/nvdimm/blk.c [deleted file]
drivers/nvdimm/bus.c
drivers/nvdimm/dimm_devs.c
drivers/nvdimm/label.c
drivers/nvdimm/label.h
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/nd-core.h
drivers/nvdimm/nd.h
drivers/nvdimm/nd_perf.c [new file with mode: 0644]
drivers/nvdimm/region.c
drivers/nvdimm/region_devs.c
drivers/parisc/dino.c
drivers/parisc/gsc.c
drivers/parisc/gsc.h
drivers/parisc/lasi.c
drivers/parisc/wax.c
drivers/ptp/ptp_ocp.c
drivers/pwm/pwm-atmel.c
drivers/pwm/pwm-bcm-kona.c
drivers/pwm/pwm-brcmstb.c
drivers/pwm/pwm-img.c
drivers/pwm/pwm-imx1.c
drivers/pwm/pwm-jz4740.c
drivers/pwm/pwm-lpc18xx-sct.c
drivers/pwm/pwm-mediatek.c
drivers/pwm/pwm-meson.c
drivers/pwm/pwm-pca9685.c
drivers/pwm/pwm-pxa.c
drivers/pwm/pwm-raspberrypi-poe.c
drivers/pwm/pwm-rcar.c
drivers/pwm/pwm-stmpe.c
drivers/pwm/pwm-sun4i.c
drivers/pwm/pwm-tegra.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/pwm-vt8500.c
drivers/regulator/rt4831-regulator.c
drivers/remoteproc/mtk_common.h
drivers/remoteproc/mtk_scp.c
drivers/remoteproc/qcom_q6v5.c
drivers/remoteproc/qcom_q6v5.h
drivers/remoteproc/qcom_q6v5_adsp.c
drivers/remoteproc/qcom_q6v5_mss.c
drivers/remoteproc/qcom_wcnss.c
drivers/remoteproc/remoteproc_cdev.c
drivers/remoteproc/remoteproc_core.c
drivers/remoteproc/remoteproc_debugfs.c
drivers/remoteproc/remoteproc_internal.h
drivers/remoteproc/remoteproc_sysfs.c
drivers/remoteproc/ti_k3_dsp_remoteproc.c
drivers/remoteproc/ti_k3_r5_remoteproc.c
drivers/remoteproc/wkup_m3_rproc.c
drivers/rpmsg/Kconfig
drivers/rpmsg/Makefile
drivers/rpmsg/qcom_glink_native.c
drivers/rpmsg/qcom_smd.c
drivers/rpmsg/rpmsg_char.c
drivers/rpmsg/rpmsg_char.h [new file with mode: 0644]
drivers/rpmsg/rpmsg_core.c
drivers/rpmsg/rpmsg_ctrl.c [new file with mode: 0644]
drivers/rpmsg/rpmsg_internal.h
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-ds1685.c
drivers/rtc/rtc-efi.c
drivers/rtc/rtc-gamecube.c
drivers/rtc/rtc-hym8563.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-mc146818-lib.c
drivers/rtc/rtc-mpc5121.c
drivers/rtc/rtc-opal.c
drivers/rtc/rtc-optee.c [new file with mode: 0644]
drivers/rtc/rtc-pcf2123.c
drivers/rtc/rtc-pcf2127.c
drivers/rtc/rtc-pcf85063.c
drivers/rtc/rtc-pcf8523.c
drivers/rtc/rtc-pcf8563.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-pm8xxx.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-sun6i.c
drivers/rtc/rtc-wm8350.c
drivers/rtc/rtc-xgene.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp_con.c
drivers/s390/char/sclp_vt220.c
drivers/s390/char/tape_34xx.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/eadm_sch.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_ep11misc.c
drivers/vdpa/ifcvf/ifcvf_base.c
drivers/vdpa/ifcvf/ifcvf_base.h
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vhost/iotlb.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/virt/vmgenid.c
drivers/virtio/Kconfig
drivers/virtio/virtio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_pci_modern_dev.c
drivers/virtio/virtio_ring.c
drivers/watchdog/Kconfig
drivers/watchdog/aspeed_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/ixp4xx_wdt.c
drivers/watchdog/orion_wdt.c
drivers/watchdog/renesas_wdt.c
drivers/watchdog/rti_wdt.c
drivers/watchdog/sp5100_tco.c
drivers/watchdog/sp5100_tco.h
drivers/watchdog/watchdog_dev.c
fs/9p/cache.c
fs/9p/v9fs.c
fs/9p/v9fs.h
fs/9p/vfs_addr.c
fs/9p/vfs_inode.c
fs/afs/dynroot.c
fs/afs/file.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/super.c
fs/afs/write.c
fs/cachefiles/io.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/cache.h
fs/ceph/inode.c
fs/ceph/super.h
fs/cifs/cifsglob.h
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/fscache/internal.h
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/lock_dlm.c
fs/gfs2/rgrp.c
fs/gfs2/rgrp.h
fs/gfs2/super.c
fs/jffs2/build.c
fs/jffs2/fs.c
fs/jffs2/jffs2_fs_i.h
fs/jffs2/scan.c
fs/netfs/Makefile
fs/netfs/buffered_read.c [new file with mode: 0644]
fs/netfs/internal.h
fs/netfs/io.c [new file with mode: 0644]
fs/netfs/main.c [new file with mode: 0644]
fs/netfs/objects.c [new file with mode: 0644]
fs/netfs/read_helper.c [deleted file]
fs/netfs/stats.c
fs/nfs/fscache.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/dat.c
fs/nilfs2/gcinode.c
fs/nilfs2/inode.c
fs/nilfs2/mdt.c
fs/nilfs2/mdt.h
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/segment.c
fs/nilfs2/super.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/io.c
fs/ubifs/ioctl.c
fs/ubifs/journal.c
fs/ubifs/ubifs.h
fs/unicode/Makefile
include/dt-bindings/clock/am3.h
include/dt-bindings/clock/am4.h
include/dt-bindings/clock/at91.h
include/dt-bindings/clock/cirrus,cs2000-cp.h [new file with mode: 0644]
include/dt-bindings/clock/dra7.h
include/dt-bindings/clock/imx93-clock.h [new file with mode: 0644]
include/dt-bindings/clock/imxrt1050-clock.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,dispcc-qcm2290.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,dispcc-sm6125.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,dispcc-sm6350.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,gcc-ipq806x.h
include/dt-bindings/clock/qcom,gcc-sm8150.h
include/dt-bindings/clock/qcom,gpucc-sm6350.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,rpmcc.h
include/dt-bindings/clock/sifive-fu540-prci.h
include/dt-bindings/clock/sifive-fu740-prci.h
include/dt-bindings/clock/starfive-jh7100-audio.h [new file with mode: 0644]
include/dt-bindings/clock/sun6i-rtc.h [new file with mode: 0644]
include/dt-bindings/reset/qcom,gcc-ipq806x.h
include/linux/balloon_compaction.h
include/linux/clk-provider.h
include/linux/clk.h
include/linux/clk/sunxi-ng.h
include/linux/cma.h
include/linux/fscache.h
include/linux/gfp.h
include/linux/gpio/driver.h
include/linux/i3c/master.h
include/linux/input.h
include/linux/input/vivaldi-fmap.h [new file with mode: 0644]
include/linux/kprobes.h
include/linux/libnvdimm.h
include/linux/mc146818rtc.h
include/linux/nd.h
include/linux/netfs.h
include/linux/remoteproc.h
include/linux/rtc.h
include/linux/rtc/ds1685.h
include/linux/soc/qcom/smd-rpm.h
include/linux/vdpa.h
include/linux/xarray.h
include/sound/pcm.h
include/trace/events/cachefiles.h
include/trace/events/netfs.h
include/trace/events/rxrpc.h
include/uapi/linux/ndctl.h
include/uapi/linux/rpmsg.h
include/uapi/linux/rtc.h
include/uapi/linux/vhost.h
include/uapi/linux/virtio_config.h
include/uapi/linux/virtio_crypto.h
init/Kconfig
kernel/Makefile
kernel/bpf/btf.c
kernel/kprobes.c
kernel/trace/fprobe.c
kernel/trace/trace_kprobe.c
lib/logic_iomem.c
lib/test_kmod.c
lib/test_xarray.c
lib/xarray.c
mm/balloon_compaction.c
mm/damon/core.c
mm/gup.c
mm/internal.h
mm/kfence/core.c
mm/kfence/kfence.h
mm/kmemleak.c
mm/madvise.c
mm/memory.c
mm/migrate.c
mm/mlock.c
mm/page_alloc.c
mm/rmap.c
mm/swap.c
net/ax25/af_ax25.c
net/can/isotp.c
net/openvswitch/actions.c
net/openvswitch/flow_netlink.c
net/rxrpc/ar-internal.h
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/server_key.c
net/xdp/xsk_buff_pool.c
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.lib
scripts/basic/fixdep.c
scripts/get_abi.pl
scripts/get_feat.pl
scripts/kallsyms.c
scripts/kconfig/confdata.c
scripts/kernel-doc
security/Kconfig
sound/core/pcm.c
sound/core/pcm_lib.c
sound/core/pcm_native.c
sound/isa/cs423x/cs4236.c
sound/pci/hda/patch_cs8409-tables.c
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_cs8409.h
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/mt6358.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/rockchip/rockchip_i2s_tdm.c
sound/soc/sof/intel/Kconfig
tools/bpf/bpftool/Makefile
tools/bpf/bpftool/feature.c
tools/bpf/bpftool/gen.c
tools/build/Makefile
tools/counter/Makefile
tools/gpio/Makefile
tools/hv/Makefile
tools/iio/Makefile
tools/include/uapi/linux/bpf.h
tools/lib/api/Makefile
tools/lib/bpf/Makefile
tools/lib/perf/Makefile
tools/lib/subcmd/Makefile
tools/objtool/Makefile
tools/pci/Makefile
tools/perf/Makefile.perf
tools/power/x86/intel-speed-select/Makefile
tools/scripts/Makefile.include
tools/scripts/utilities.mak
tools/spi/Makefile
tools/testing/nvdimm/Kbuild
tools/testing/nvdimm/config_check.c
tools/testing/nvdimm/test/ndtest.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
tools/testing/selftests/bpf/test_lpm_map.c
tools/testing/selftests/lib.mk
tools/testing/selftests/wireguard/qemu/init.c
tools/tracing/rtla/Makefile
tools/usb/Makefile
tools/virtio/Makefile
tools/virtio/linux/dma-mapping.h
tools/vm/page_owner_sort.c
usr/Makefile
usr/include/Makefile

index 8fd9b3c..b9d3582 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -213,6 +213,7 @@ Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
 Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
 Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
 Kenneth W Chen <kenneth.w.chen@intel.com>
+Kirill Tkhai <kirill.tkhai@openvz.org> <ktkhai@virtuozzo.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
index bff84a1..1c1f5ac 100644 (file)
@@ -6,3 +6,38 @@ Description:
 
 The libnvdimm sub-system implements a common sysfs interface for
 platform nvdimm resources. See Documentation/driver-api/nvdimm/.
+
+What:           /sys/bus/event_source/devices/nmemX/format
+Date:           February 2022
+KernelVersion:  5.18
+Contact:        Kajol Jain <kjain@linux.ibm.com>
+Description:   (RO) Attribute group to describe the magic bits
+               that go into perf_event_attr.config for a particular pmu.
+               (See ABI/testing/sysfs-bus-event_source-devices-format).
+
+               Each attribute under this group defines a bit range of the
+               perf_event_attr.config. Supported attribute is listed
+               below::
+                 event  = "config:0-4"  - event ID
+
+               For example::
+                       ctl_res_cnt = "event=0x1"
+
+What:           /sys/bus/event_source/devices/nmemX/events
+Date:           February 2022
+KernelVersion:  5.18
+Contact:        Kajol Jain <kjain@linux.ibm.com>
+Description:   (RO) Attribute group to describe performance monitoring events
+                for the nvdimm memory device. Each attribute in this group
+                describes a single performance monitoring event supported by
+                this nvdimm pmu.  The name of the file is the name of the event.
+                (See ABI/testing/sysfs-bus-event_source-devices-events). A
+                listing of the events supported by a given nvdimm provider type
+                can be found in Documentation/driver-api/nvdimm/$provider.
+
+What:          /sys/bus/event_source/devices/nmemX/cpumask
+Date:          February 2022
+KernelVersion:  5.18
+Contact:        Kajol Jain <kjain@linux.ibm.com>
+Description:   (RO) This sysfs file exposes the cpumask which is designated to
+               to retrieve nvdimm pmu event counter data.
index b7ccaa2..3f1cc5e 100644 (file)
                        fully seed the kernel's CRNG. Default is controlled
                        by CONFIG_RANDOM_TRUST_CPU.
 
+       random.trust_bootloader={on,off}
+                       [KNL] Enable or disable trusting the use of a
+                       seed passed by the bootloader (if available) to
+                       fully seed the kernel's CRNG. Default is controlled
+                       by CONFIG_RANDOM_TRUST_BOOTLOADER.
+
        randomize_kstack_offset=
                        [KNL] Enable or disable kernel stack offset
                        randomization, which provides roughly 5 bits of
index 253496a..761474b 100644 (file)
@@ -658,7 +658,7 @@ when:
 
 .. Links
 .. _Documentation/process/: https://www.kernel.org/doc/html/latest/process/
-.. _netdev-FAQ: ../networking/netdev-FAQ.rst
+.. _netdev-FAQ: Documentation/process/maintainer-netdev.rst
 .. _selftests:
    https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/
 .. _Documentation/dev-tools/kselftest.rst:
index a137a0e..77e0ece 100644 (file)
@@ -315,11 +315,15 @@ indeed the normal API is implemented in terms of the advanced API.  The
 advanced API is only available to modules with a GPL-compatible license.
 
 The advanced API is based around the xa_state.  This is an opaque data
-structure which you declare on the stack using the XA_STATE()
-macro.  This macro initialises the xa_state ready to start walking
-around the XArray.  It is used as a cursor to maintain the position
-in the XArray and let you compose various operations together without
-having to restart from the top every time.
+structure which you declare on the stack using the XA_STATE() macro.
+This macro initialises the xa_state ready to start walking around the
+XArray.  It is used as a cursor to maintain the position in the XArray
+and let you compose various operations together without having to restart
+from the top every time.  The contents of the xa_state are protected by
+the rcu_read_lock() or the xas_lock().  If you need to drop whichever of
+those locks is protecting your state and tree, you must call xas_pause()
+so that future calls do not rely on the parts of the state which were
+left unprotected.
 
 The xa_state is also used to store errors.  You can call
 xas_error() to retrieve the error.  All operations check whether
index aa2cea8..ff9c85a 100644 (file)
@@ -26,10 +26,7 @@ The fundamental unit in KUnit is the test case. The KUnit test cases are
 grouped into KUnit suites. A KUnit test case is a function with type
 signature ``void (*)(struct kunit *test)``.
 These test case functions are wrapped in a struct called
-``struct kunit_case``. For code, see:
-
-.. kernel-doc:: include/kunit/test.h
-       :identifiers: kunit_case
+struct kunit_case.
 
 .. note:
        ``generate_params`` is optional for non-parameterized tests.
@@ -152,18 +149,12 @@ Parameterized Tests
 Each KUnit parameterized test is associated with a collection of
 parameters. The test is invoked multiple times, once for each parameter
 value and the parameter is stored in the ``param_value`` field.
-The test case includes a ``KUNIT_CASE_PARAM()`` macro that accepts a
+The test case includes a KUNIT_CASE_PARAM() macro that accepts a
 generator function.
 The generator function is passed the previous parameter and returns the next
 parameter. It also provides a macro to generate common-case generators based on
 arrays.
 
-For code, see:
-
-.. kernel-doc:: include/kunit/test.h
-       :identifiers: KUNIT_ARRAY_PARAM
-
-
 kunit_tool (Command Line Test Harness)
 ======================================
 
index fd00617..a87ec15 100644 (file)
@@ -86,6 +86,7 @@ This binding uses the common clock binding[1].
 
 Required properties:
 - compatible:          Should be one of:
+                         "fsl,imx8dxl-clk"
                          "fsl,imx8qm-clk"
                          "fsl,imx8qxp-clk"
                        followed by "fsl,scu-clk"
diff --git a/Documentation/devicetree/bindings/arm/idle-states.yaml b/Documentation/devicetree/bindings/arm/idle-states.yaml
deleted file mode 100644 (file)
index 4d381fa..0000000
+++ /dev/null
@@ -1,661 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/arm/idle-states.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: ARM idle states binding description
-
-maintainers:
-  - Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-
-description: |+
-  ==========================================
-  1 - Introduction
-  ==========================================
-
-  ARM systems contain HW capable of managing power consumption dynamically,
-  where cores can be put in different low-power states (ranging from simple wfi
-  to power gating) according to OS PM policies. The CPU states representing the
-  range of dynamic idle states that a processor can enter at run-time, can be
-  specified through device tree bindings representing the parameters required to
-  enter/exit specific idle states on a given processor.
-
-  According to the Server Base System Architecture document (SBSA, [3]), the
-  power states an ARM CPU can be put into are identified by the following list:
-
-  - Running
-  - Idle_standby
-  - Idle_retention
-  - Sleep
-  - Off
-
-  The power states described in the SBSA document define the basic CPU states on
-  top of which ARM platforms implement power management schemes that allow an OS
-  PM implementation to put the processor in different idle states (which include
-  states listed above; "off" state is not an idle state since it does not have
-  wake-up capabilities, hence it is not considered in this document).
-
-  Idle state parameters (e.g. entry latency) are platform specific and need to
-  be characterized with bindings that provide the required information to OS PM
-  code so that it can build the required tables and use them at runtime.
-
-  The device tree binding definition for ARM idle states is the subject of this
-  document.
-
-  ===========================================
-  2 - idle-states definitions
-  ===========================================
-
-  Idle states are characterized for a specific system through a set of
-  timing and energy related properties, that underline the HW behaviour
-  triggered upon idle states entry and exit.
-
-  The following diagram depicts the CPU execution phases and related timing
-  properties required to enter and exit an idle state:
-
-  ..__[EXEC]__|__[PREP]__|__[ENTRY]__|__[IDLE]__|__[EXIT]__|__[EXEC]__..
-              |          |           |          |          |
-
-              |<------ entry ------->|
-              |       latency        |
-                                                |<- exit ->|
-                                                |  latency |
-              |<-------- min-residency -------->|
-                         |<-------  wakeup-latency ------->|
-
-      Diagram 1: CPU idle state execution phases
-
-  EXEC:  Normal CPU execution.
-
-  PREP:  Preparation phase before committing the hardware to idle mode
-    like cache flushing. This is abortable on pending wake-up
-    event conditions. The abort latency is assumed to be negligible
-    (i.e. less than the ENTRY + EXIT duration). If aborted, CPU
-    goes back to EXEC. This phase is optional. If not abortable,
-    this should be included in the ENTRY phase instead.
-
-  ENTRY:  The hardware is committed to idle mode. This period must run
-    to completion up to IDLE before anything else can happen.
-
-  IDLE:  This is the actual energy-saving idle period. This may last
-    between 0 and infinite time, until a wake-up event occurs.
-
-  EXIT:  Period during which the CPU is brought back to operational
-    mode (EXEC).
-
-  entry-latency: Worst case latency required to enter the idle state. The
-  exit-latency may be guaranteed only after entry-latency has passed.
-
-  min-residency: Minimum period, including preparation and entry, for a given
-  idle state to be worthwhile energywise.
-
-  wakeup-latency: Maximum delay between the signaling of a wake-up event and the
-  CPU being able to execute normal code again. If not specified, this is assumed
-  to be entry-latency + exit-latency.
-
-  These timing parameters can be used by an OS in different circumstances.
-
-  An idle CPU requires the expected min-residency time to select the most
-  appropriate idle state based on the expected expiry time of the next IRQ
-  (i.e. wake-up) that causes the CPU to return to the EXEC phase.
-
-  An operating system scheduler may need to compute the shortest wake-up delay
-  for CPUs in the system by detecting how long will it take to get a CPU out
-  of an idle state, e.g.:
-
-  wakeup-delay = exit-latency + max(entry-latency - (now - entry-timestamp), 0)
-
-  In other words, the scheduler can make its scheduling decision by selecting
-  (e.g. waking-up) the CPU with the shortest wake-up delay.
-  The wake-up delay must take into account the entry latency if that period
-  has not expired. The abortable nature of the PREP period can be ignored
-  if it cannot be relied upon (e.g. the PREP deadline may occur much sooner than
-  the worst case since it depends on the CPU operating conditions, i.e. caches
-  state).
-
-  An OS has to reliably probe the wakeup-latency since some devices can enforce
-  latency constraint guarantees to work properly, so the OS has to detect the
-  worst case wake-up latency it can incur if a CPU is allowed to enter an
-  idle state, and possibly to prevent that to guarantee reliable device
-  functioning.
-
-  The min-residency time parameter deserves further explanation since it is
-  expressed in time units but must factor in energy consumption coefficients.
-
-  The energy consumption of a cpu when it enters a power state can be roughly
-  characterised by the following graph:
-
-                 |
-                 |
-                 |
-             e   |
-             n   |                                      /---
-             e   |                               /------
-             r   |                        /------
-             g   |                  /-----
-             y   |           /------
-                 |       ----
-                 |      /|
-                 |     / |
-                 |    /  |
-                 |   /   |
-                 |  /    |
-                 | /     |
-                 |/      |
-            -----|-------+----------------------------------
-                0|       1                              time(ms)
-
-      Graph 1: Energy vs time example
-
-  The graph is split in two parts delimited by time 1ms on the X-axis.
-  The graph curve with X-axis values = { x | 0 < x < 1ms } has a steep slope
-  and denotes the energy costs incurred while entering and leaving the idle
-  state.
-  The graph curve in the area delimited by X-axis values = {x | x > 1ms } has
-  shallower slope and essentially represents the energy consumption of the idle
-  state.
-
-  min-residency is defined for a given idle state as the minimum expected
-  residency time for a state (inclusive of preparation and entry) after
-  which choosing that state become the most energy efficient option. A good
-  way to visualise this, is by taking the same graph above and comparing some
-  states energy consumptions plots.
-
-  For sake of simplicity, let's consider a system with two idle states IDLE1,
-  and IDLE2:
-
-            |
-            |
-            |
-            |                                                  /-- IDLE1
-         e  |                                              /---
-         n  |                                         /----
-         e  |                                     /---
-         r  |                                /-----/--------- IDLE2
-         g  |                    /-------/---------
-         y  |        ------------    /---|
-            |       /           /----    |
-            |      /        /---         |
-            |     /    /----             |
-            |    / /---                  |
-            |   ---                      |
-            |  /                         |
-            | /                          |
-            |/                           |                  time
-         ---/----------------------------+------------------------
-            |IDLE1-energy < IDLE2-energy | IDLE2-energy < IDLE1-energy
-                                         |
-                                  IDLE2-min-residency
-
-      Graph 2: idle states min-residency example
-
-  In graph 2 above, that takes into account idle states entry/exit energy
-  costs, it is clear that if the idle state residency time (i.e. time till next
-  wake-up IRQ) is less than IDLE2-min-residency, IDLE1 is the better idle state
-  choice energywise.
-
-  This is mainly down to the fact that IDLE1 entry/exit energy costs are lower
-  than IDLE2.
-
-  However, the lower power consumption (i.e. shallower energy curve slope) of
-  idle state IDLE2 implies that after a suitable time, IDLE2 becomes more energy
-  efficient.
-
-  The time at which IDLE2 becomes more energy efficient than IDLE1 (and other
-  shallower states in a system with multiple idle states) is defined
-  IDLE2-min-residency and corresponds to the time when energy consumption of
-  IDLE1 and IDLE2 states breaks even.
-
-  The definitions provided in this section underpin the idle states
-  properties specification that is the subject of the following sections.
-
-  ===========================================
-  3 - idle-states node
-  ===========================================
-
-  ARM processor idle states are defined within the idle-states node, which is
-  a direct child of the cpus node [1] and provides a container where the
-  processor idle states, defined as device tree nodes, are listed.
-
-  On ARM systems, it is a container of processor idle states nodes. If the
-  system does not provide CPU power management capabilities, or the processor
-  just supports idle_standby, an idle-states node is not required.
-
-  ===========================================
-  4 - References
-  ===========================================
-
-  [1] ARM Linux Kernel documentation - CPUs bindings
-      Documentation/devicetree/bindings/arm/cpus.yaml
-
-  [2] ARM Linux Kernel documentation - PSCI bindings
-      Documentation/devicetree/bindings/arm/psci.yaml
-
-  [3] ARM Server Base System Architecture (SBSA)
-      http://infocenter.arm.com/help/index.jsp
-
-  [4] ARM Architecture Reference Manuals
-      http://infocenter.arm.com/help/index.jsp
-
-  [6] ARM Linux Kernel documentation - Booting AArch64 Linux
-      Documentation/arm64/booting.rst
-
-properties:
-  $nodename:
-    const: idle-states
-
-  entry-method:
-    description: |
-      Usage and definition depend on ARM architecture version.
-
-      On ARM v8 64-bit this property is required.
-      On ARM 32-bit systems this property is optional
-
-      This assumes that the "enable-method" property is set to "psci" in the cpu
-      node[6] that is responsible for setting up CPU idle management in the OS
-      implementation.
-    const: psci
-
-patternProperties:
-  "^(cpu|cluster)-":
-    type: object
-    description: |
-      Each state node represents an idle state description and must be defined
-      as follows.
-
-      The idle state entered by executing the wfi instruction (idle_standby
-      SBSA,[3][4]) is considered standard on all ARM platforms and therefore
-      must not be listed.
-
-      In addition to the properties listed above, a state node may require
-      additional properties specific to the entry-method defined in the
-      idle-states node. Please refer to the entry-method bindings
-      documentation for properties definitions.
-
-    properties:
-      compatible:
-        const: arm,idle-state
-
-      local-timer-stop:
-        description:
-          If present the CPU local timer control logic is
-             lost on state entry, otherwise it is retained.
-        type: boolean
-
-      entry-latency-us:
-        description:
-          Worst case latency in microseconds required to enter the idle state.
-
-      exit-latency-us:
-        description:
-          Worst case latency in microseconds required to exit the idle state.
-          The exit-latency-us duration may be guaranteed only after
-          entry-latency-us has passed.
-
-      min-residency-us:
-        description:
-          Minimum residency duration in microseconds, inclusive of preparation
-          and entry, for this idle state to be considered worthwhile energy wise
-          (refer to section 2 of this document for a complete description).
-
-      wakeup-latency-us:
-        description: |
-          Maximum delay between the signaling of a wake-up event and the CPU
-          being able to execute normal code again. If omitted, this is assumed
-          to be equal to:
-
-            entry-latency-us + exit-latency-us
-
-          It is important to supply this value on systems where the duration of
-          PREP phase (see diagram 1, section 2) is non-neglibigle. In such
-          systems entry-latency-us + exit-latency-us will exceed
-          wakeup-latency-us by this duration.
-
-      idle-state-name:
-        $ref: /schemas/types.yaml#/definitions/string
-        description:
-          A string used as a descriptive name for the idle state.
-
-    required:
-      - compatible
-      - entry-latency-us
-      - exit-latency-us
-      - min-residency-us
-
-additionalProperties: false
-
-examples:
-  - |
-
-    cpus {
-        #size-cells = <0>;
-        #address-cells = <2>;
-
-        cpu@0 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a57";
-            reg = <0x0 0x0>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
-                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
-        };
-
-        cpu@1 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a57";
-            reg = <0x0 0x1>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
-                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
-        };
-
-        cpu@100 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a57";
-            reg = <0x0 0x100>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
-                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
-        };
-
-        cpu@101 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a57";
-            reg = <0x0 0x101>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
-                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
-        };
-
-        cpu@10000 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a57";
-            reg = <0x0 0x10000>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
-                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
-        };
-
-        cpu@10001 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a57";
-            reg = <0x0 0x10001>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
-                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
-        };
-
-        cpu@10100 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a57";
-            reg = <0x0 0x10100>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
-                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
-        };
-
-        cpu@10101 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a57";
-            reg = <0x0 0x10101>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
-                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
-        };
-
-        cpu@100000000 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a53";
-            reg = <0x1 0x0>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
-                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
-        };
-
-        cpu@100000001 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a53";
-            reg = <0x1 0x1>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
-                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
-        };
-
-        cpu@100000100 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a53";
-            reg = <0x1 0x100>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
-                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
-        };
-
-        cpu@100000101 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a53";
-            reg = <0x1 0x101>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
-                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
-        };
-
-        cpu@100010000 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a53";
-            reg = <0x1 0x10000>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
-                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
-        };
-
-        cpu@100010001 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a53";
-            reg = <0x1 0x10001>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
-                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
-        };
-
-        cpu@100010100 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a53";
-            reg = <0x1 0x10100>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
-                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
-        };
-
-        cpu@100010101 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a53";
-            reg = <0x1 0x10101>;
-            enable-method = "psci";
-            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
-                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
-        };
-
-        idle-states {
-            entry-method = "psci";
-
-            CPU_RETENTION_0_0: cpu-retention-0-0 {
-                compatible = "arm,idle-state";
-                arm,psci-suspend-param = <0x0010000>;
-                entry-latency-us = <20>;
-                exit-latency-us = <40>;
-                min-residency-us = <80>;
-            };
-
-            CLUSTER_RETENTION_0: cluster-retention-0 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                arm,psci-suspend-param = <0x1010000>;
-                entry-latency-us = <50>;
-                exit-latency-us = <100>;
-                min-residency-us = <250>;
-                wakeup-latency-us = <130>;
-            };
-
-            CPU_SLEEP_0_0: cpu-sleep-0-0 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                arm,psci-suspend-param = <0x0010000>;
-                entry-latency-us = <250>;
-                exit-latency-us = <500>;
-                min-residency-us = <950>;
-            };
-
-            CLUSTER_SLEEP_0: cluster-sleep-0 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                arm,psci-suspend-param = <0x1010000>;
-                entry-latency-us = <600>;
-                exit-latency-us = <1100>;
-                min-residency-us = <2700>;
-                wakeup-latency-us = <1500>;
-            };
-
-            CPU_RETENTION_1_0: cpu-retention-1-0 {
-                compatible = "arm,idle-state";
-                arm,psci-suspend-param = <0x0010000>;
-                entry-latency-us = <20>;
-                exit-latency-us = <40>;
-                min-residency-us = <90>;
-            };
-
-            CLUSTER_RETENTION_1: cluster-retention-1 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                arm,psci-suspend-param = <0x1010000>;
-                entry-latency-us = <50>;
-                exit-latency-us = <100>;
-                min-residency-us = <270>;
-                wakeup-latency-us = <100>;
-            };
-
-            CPU_SLEEP_1_0: cpu-sleep-1-0 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                arm,psci-suspend-param = <0x0010000>;
-                entry-latency-us = <70>;
-                exit-latency-us = <100>;
-                min-residency-us = <300>;
-                wakeup-latency-us = <150>;
-            };
-
-            CLUSTER_SLEEP_1: cluster-sleep-1 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                arm,psci-suspend-param = <0x1010000>;
-                entry-latency-us = <500>;
-                exit-latency-us = <1200>;
-                min-residency-us = <3500>;
-                wakeup-latency-us = <1300>;
-            };
-        };
-    };
-
-  - |
-    // Example 2 (ARM 32-bit, 8-cpu system, two clusters):
-
-    cpus {
-        #size-cells = <0>;
-        #address-cells = <1>;
-
-        cpu@0 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a15";
-            reg = <0x0>;
-            cpu-idle-states = <&cpu_sleep_0_0>, <&cluster_sleep_0>;
-        };
-
-        cpu@1 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a15";
-            reg = <0x1>;
-            cpu-idle-states = <&cpu_sleep_0_0>, <&cluster_sleep_0>;
-        };
-
-        cpu@2 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a15";
-            reg = <0x2>;
-            cpu-idle-states = <&cpu_sleep_0_0>, <&cluster_sleep_0>;
-        };
-
-        cpu@3 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a15";
-            reg = <0x3>;
-            cpu-idle-states = <&cpu_sleep_0_0>, <&cluster_sleep_0>;
-        };
-
-        cpu@100 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a7";
-            reg = <0x100>;
-            cpu-idle-states = <&cpu_sleep_1_0>, <&cluster_sleep_1>;
-        };
-
-        cpu@101 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a7";
-            reg = <0x101>;
-            cpu-idle-states = <&cpu_sleep_1_0>, <&cluster_sleep_1>;
-        };
-
-        cpu@102 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a7";
-            reg = <0x102>;
-            cpu-idle-states = <&cpu_sleep_1_0>, <&cluster_sleep_1>;
-        };
-
-        cpu@103 {
-            device_type = "cpu";
-            compatible = "arm,cortex-a7";
-            reg = <0x103>;
-            cpu-idle-states = <&cpu_sleep_1_0>, <&cluster_sleep_1>;
-        };
-
-        idle-states {
-            cpu_sleep_0_0: cpu-sleep-0-0 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                entry-latency-us = <200>;
-                exit-latency-us = <100>;
-                min-residency-us = <400>;
-                wakeup-latency-us = <250>;
-            };
-
-            cluster_sleep_0: cluster-sleep-0 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                entry-latency-us = <500>;
-                exit-latency-us = <1500>;
-                min-residency-us = <2500>;
-                wakeup-latency-us = <1700>;
-            };
-
-            cpu_sleep_1_0: cpu-sleep-1-0 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                entry-latency-us = <300>;
-                exit-latency-us = <500>;
-                min-residency-us = <900>;
-                wakeup-latency-us = <600>;
-            };
-
-            cluster_sleep_1: cluster-sleep-1 {
-                compatible = "arm,idle-state";
-                local-timer-stop;
-                entry-latency-us = <800>;
-                exit-latency-us = <2000>;
-                min-residency-us = <6500>;
-                wakeup-latency-us = <2300>;
-            };
-        };
-    };
-
-...
index 6ce0b21..606b4b1 100644 (file)
@@ -81,4 +81,4 @@ Example:
                };
        };
 
-[1]. Documentation/devicetree/bindings/arm/idle-states.yaml
+[1]. Documentation/devicetree/bindings/cpu/idle-states.yaml
index 8b77cf8..dd83ef2 100644 (file)
@@ -101,7 +101,7 @@ properties:
       bindings in [1]) must specify this property.
 
       [1] Kernel documentation - ARM idle states bindings
-        Documentation/devicetree/bindings/arm/idle-states.yaml
+        Documentation/devicetree/bindings/cpu/idle-states.yaml
 
 patternProperties:
   "^power-domain-":
diff --git a/Documentation/devicetree/bindings/clock/apple,nco.yaml b/Documentation/devicetree/bindings/clock/apple,nco.yaml
new file mode 100644 (file)
index 0000000..74eab5c
--- /dev/null
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/apple,nco.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple SoCs' NCO block
+
+maintainers:
+  - Martin PoviÅ¡er <povik+lin@cutebit.org>
+
+description: |
+  The NCO (Numerically Controlled Oscillator) block found on Apple SoCs
+  such as the t8103 (M1) is a programmable clock generator performing
+  fractional division of a high frequency input clock.
+
+  It carries a number of independent channels and is typically used for
+  generation of audio bitclocks.
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,t6000-nco
+          - apple,t8103-nco
+      - const: apple,nco
+
+  clocks:
+    description:
+      Specifies the reference clock from which the output clocks
+      are derived through fractional division.
+    maxItems: 1
+
+  '#clock-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - clocks
+  - '#clock-cells'
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    nco_clkref: clock-ref {
+      compatible = "fixed-clock";
+      #clock-cells = <0>;
+      clock-frequency = <900000000>;
+      clock-output-names = "nco-ref";
+    };
+
+    nco: clock-controller@23b044000 {
+      compatible = "apple,t8103-nco", "apple,nco";
+      reg = <0x3b044000 0x14000>;
+      #clock-cells = <1>;
+      clocks = <&nco_clkref>;
+    };
index 228c931..f0f9392 100644 (file)
@@ -61,16 +61,4 @@ examples:
         #clock-cells = <1>;
     };
 
-  # Example UART controller node that consumes clock generated by the clock controller:
-  - |
-    uart0: serial@58018000 {
-         compatible = "snps,dw-apb-uart";
-         reg = <0x58018000 0x2000>;
-         clocks = <&clk 45>, <&clk 46>;
-         clock-names = "baudclk", "apb_pclk";
-         interrupts = <0 9 4>;
-         reg-shift = <2>;
-         reg-io-width = <4>;
-    };
-
 ...
diff --git a/Documentation/devicetree/bindings/clock/cirrus,cs2000-cp.yaml b/Documentation/devicetree/bindings/clock/cirrus,cs2000-cp.yaml
new file mode 100644 (file)
index 0000000..0abd6ba
--- /dev/null
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/cirrus,cs2000-cp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding CIRRUS LOGIC Fractional-N Clock Synthesizer & Clock Multiplier
+
+maintainers:
+  - Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+description: |
+  The CS2000-CP is an extremely versatile system clocking device that
+  utilizes a programmable phase lock loop.
+
+  Link: https://www.cirrus.com/products/cs2000/
+
+properties:
+  compatible:
+    enum:
+      - cirrus,cs2000-cp
+
+  clocks:
+    description:
+      Common clock binding for CLK_IN, XTI/REF_CLK
+    minItems: 2
+    maxItems: 2
+
+  clock-names:
+    items:
+      - const: clk_in
+      - const: ref_clk
+
+  '#clock-cells':
+    const: 0
+
+  reg:
+    maxItems: 1
+
+  cirrus,aux-output-source:
+    description:
+      Specifies the function of the auxiliary clock output pin
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum:
+      - 0 # CS2000CP_AUX_OUTPUT_REF_CLK:  ref_clk input
+      - 1 # CS2000CP_AUX_OUTPUT_CLK_IN:   clk_in input
+      - 2 # CS2000CP_AUX_OUTPUT_CLK_OUT:  clk_out output
+      - 3 # CS2000CP_AUX_OUTPUT_PLL_LOCK: pll lock status
+    default: 0
+
+  cirrus,clock-skip:
+    description:
+      This mode allows the PLL to maintain lock even when CLK_IN
+      has missing pulses for up to 20 ms.
+    $ref: /schemas/types.yaml#/definitions/flag
+
+  cirrus,dynamic-mode:
+    description:
+      In dynamic mode, the CLK_IN input is used to drive the
+      digital PLL of the silicon.
+      If not given, the static mode shall be used to derive the
+      output signal directly from the REF_CLK input.
+    $ref: /schemas/types.yaml#/definitions/flag
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/cirrus,cs2000-cp.h>
+
+    i2c@0 {
+      reg = <0x0 0x100>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      clock-controller@4f {
+        #clock-cells = <0>;
+        compatible = "cirrus,cs2000-cp";
+        reg = <0x4f>;
+        clocks = <&rcar_sound 0>, <&x12_clk>;
+        clock-names = "clk_in", "ref_clk";
+        cirrus,aux-output-source = <CS2000CP_AUX_OUTPUT_CLK_OUT>;
+      };
+    };
diff --git a/Documentation/devicetree/bindings/clock/cs2000-cp.txt b/Documentation/devicetree/bindings/clock/cs2000-cp.txt
deleted file mode 100644 (file)
index 54e6df0..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-CIRRUS LOGIC Fractional-N Clock Synthesizer & Clock Multiplier
-
-Required properties:
-
-- compatible:          "cirrus,cs2000-cp"
-- reg:                 The chip select number on the I2C bus
-- clocks:              common clock binding for CLK_IN, XTI/REF_CLK
-- clock-names:         CLK_IN : clk_in, XTI/REF_CLK : ref_clk
-- #clock-cells:                must be <0>
-
-Example:
-
-&i2c2 {
-       ...
-       cs2000: clk_multiplier@4f {
-               #clock-cells = <0>;
-               compatible = "cirrus,cs2000-cp";
-               reg = <0x4f>;
-               clocks = <&rcar_sound 0>, <&x12_clk>;
-               clock-names = "clk_in", "ref_clk";
-       };
-};
index ffd6ae0..be66f1e 100644 (file)
@@ -191,11 +191,4 @@ examples:
         };
     };
 
-    /* Consumer referencing the 5P49V5923 pin OUT1 */
-    consumer {
-        /* ... */
-        clocks = <&vc5 1>;
-        /* ... */
-    };
-
 ...
index f4833a2..56f5247 100644 (file)
@@ -40,12 +40,3 @@ examples:
         compatible = "fsl,imx1-ccm";
         reg = <0x0021b000 0x1000>;
     };
-
-    pwm@208000 {
-        #pwm-cells = <2>;
-        compatible = "fsl,imx1-pwm";
-        reg = <0x00208000 0x1000>;
-        interrupts = <34>;
-        clocks = <&clks IMX1_CLK_DUMMY>, <&clks IMX1_CLK_PER1>;
-        clock-names = "ipg", "per";
-    };
index 518ad9a..e2d5054 100644 (file)
@@ -40,12 +40,3 @@ examples:
         reg = <0x10027000 0x800>;
         #clock-cells = <1>;
     };
-
-    serial@1000a000 {
-        compatible = "fsl,imx21-uart";
-        reg = <0x1000a000 0x1000>;
-        interrupts = <20>;
-        clocks = <&clks IMX21_CLK_UART1_IPG_GATE>,
-                 <&clks IMX21_CLK_PER1>;
-        clock-names = "ipg", "per";
-    };
index 5e296a0..7e890ab 100644 (file)
@@ -83,12 +83,3 @@ examples:
         reg = <0x80040000 0x2000>;
         #clock-cells = <1>;
     };
-
-    serial@8006c000 {
-        compatible = "fsl,imx23-auart";
-        reg = <0x8006c000 0x2000>;
-        interrupts = <24>;
-        clocks = <&clks 32>;
-        dmas = <&dma_apbx 6>, <&dma_apbx 7>;
-        dma-names = "rx", "tx";
-    };
index 2a2b107..1792e13 100644 (file)
@@ -176,11 +176,3 @@ examples:
         interrupts = <31>;
         #clock-cells = <1>;
     };
-
-    serial@43f90000 {
-        compatible = "fsl,imx25-uart", "fsl,imx21-uart";
-        reg = <0x43f90000 0x4000>;
-        interrupts = <45>;
-        clocks = <&clks 79>, <&clks 50>;
-        clock-names = "ipg", "per";
-    };
index 160268f..99925aa 100644 (file)
@@ -44,12 +44,3 @@ examples:
         interrupts = <31>;
         #clock-cells = <1>;
     };
-
-    serial@1000a000 {
-        compatible = "fsl,imx27-uart", "fsl,imx21-uart";
-        reg = <0x1000a000 0x1000>;
-        interrupts = <20>;
-        clocks = <&clks IMX27_CLK_UART1_IPG_GATE>,
-                 <&clks IMX27_CLK_PER1_GATE>;
-        clock-names = "ipg", "per";
-    };
index f831b78..a542d68 100644 (file)
@@ -106,12 +106,3 @@ examples:
         reg = <0x80040000 0x2000>;
         #clock-cells = <1>;
     };
-
-    serial@8006a000 {
-        compatible = "fsl,imx28-auart";
-        reg = <0x8006a000 0x2000>;
-        interrupts = <112>;
-        dmas = <&dma_apbx 8>, <&dma_apbx 9>;
-        dma-names = "rx", "tx";
-        clocks = <&clks 45>;
-    };
index d233626..168c8ad 100644 (file)
@@ -110,11 +110,3 @@ examples:
         interrupts = <31>, <53>;
         #clock-cells = <1>;
     };
-
-    serial@43f90000 {
-        compatible = "fsl,imx31-uart", "fsl,imx21-uart";
-        reg = <0x43f90000 0x4000>;
-        interrupts = <45>;
-        clocks = <&clks 10>, <&clks 30>;
-        clock-names = "ipg", "per";
-    };
index 3e20cca..6415bb6 100644 (file)
@@ -129,11 +129,3 @@ examples:
         interrupts = <31>;
         #clock-cells = <1>;
     };
-
-    mmc@53fb4000 {
-        compatible = "fsl,imx35-esdhc";
-        reg = <0x53fb4000 0x4000>;
-        interrupts = <7>;
-        clocks = <&clks 9>, <&clks 8>, <&clks 43>;
-        clock-names = "ipg", "ahb", "per";
-    };
index 7caf5ce..739c337 100644 (file)
@@ -108,14 +108,3 @@ examples:
                        "upll", "sosc_bus_clk", "firc_bus_clk",
                        "rosc", "spll_bus_clk";
     };
-
-    mmc@40380000 {
-        compatible = "fsl,imx7ulp-usdhc";
-        reg = <0x40380000 0x10000>;
-        interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
-        clocks = <&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>,
-                 <&scg1 IMX7ULP_CLK_NIC1_DIV>,
-                 <&pcc2 IMX7ULP_CLK_USDHC1>;
-        clock-names ="ipg", "ahb", "per";
-        bus-width = <4>;
-    };
index ee8efb4..d06344d 100644 (file)
@@ -86,14 +86,3 @@ examples:
                       "firc", "upll";
         #clock-cells = <1>;
     };
-
-    mmc@40380000 {
-        compatible = "fsl,imx7ulp-usdhc";
-        reg = <0x40380000 0x10000>;
-        interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
-        clocks = <&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>,
-                 <&scg1 IMX7ULP_CLK_NIC1_DIV>,
-                 <&pcc2 IMX7ULP_CLK_USDHC1>;
-        clock-names ="ipg", "ahb", "per";
-        bus-width = <4>;
-    };
index 0f6fe36..cb80105 100644 (file)
@@ -101,14 +101,3 @@ examples:
                              "sdhc0_lpcg_ahb_clk";
         power-domains = <&pd IMX_SC_R_SDHC_0>;
     };
-
-    mmc@5b010000 {
-        compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
-        interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
-        reg = <0x5b010000 0x10000>;
-        clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
-                 <&sdhc0_lpcg IMX_LPCG_CLK_5>,
-                 <&sdhc0_lpcg IMX_LPCG_CLK_0>;
-        clock-names = "ipg", "ahb", "per";
-        power-domains = <&pd IMX_SC_R_SDHC_0>;
-    };
diff --git a/Documentation/devicetree/bindings/clock/imx93-clock.yaml b/Documentation/devicetree/bindings/clock/imx93-clock.yaml
new file mode 100644 (file)
index 0000000..21a0619
--- /dev/null
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx93-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX93 Clock Control Module Binding
+
+maintainers:
+  - Peng Fan <peng.fan@nxp.com>
+
+description: |
+  i.MX93 clock control module is an integrated clock controller, which
+  includes clock generator, clock gate and supplies to all modules.
+
+properties:
+  compatible:
+    enum:
+      - fsl,imx93-ccm
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    description:
+      specify the external clocks used by the CCM module.
+    items:
+      - description: 32k osc
+      - description: 24m osc
+      - description: ext1 clock input
+
+  clock-names:
+    description:
+      specify the external clocks names used by the CCM module.
+    items:
+      - const: osc_32k
+      - const: osc_24m
+      - const: clk_ext1
+
+  '#clock-cells':
+    const: 1
+    description:
+      See include/dt-bindings/clock/imx93-clock.h for the full list of
+      i.MX93 clock IDs.
+
+required:
+  - compatible
+  - reg
+  - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+  # Clock Control Module node:
+  - |
+    clock-controller@44450000 {
+        compatible = "fsl,imx93-ccm";
+        reg = <0x44450000 0x10000>;
+        #clock-cells = <1>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/imxrt1050-clock.yaml b/Documentation/devicetree/bindings/clock/imxrt1050-clock.yaml
new file mode 100644 (file)
index 0000000..03fc5c1
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imxrt1050-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MXRT
+
+maintainers:
+  - Giulio Benetti <giulio.benetti@benettiengineering.com>
+  - Jesse Taube <Mr.Bossman075@gmail.com>
+
+description: |
+  The clock consumer should specify the desired clock by having the clock
+  ID in its "clocks" phandle cell. See include/dt-bindings/clock/imxrt*-clock.h
+  for the full list of i.MXRT clock IDs.
+
+properties:
+  compatible:
+    const: fsl,imxrt1050-ccm
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 2
+
+  clocks:
+    description: 24m osc
+    maxItems: 1
+
+  clock-names:
+    const: osc
+
+  '#clock-cells':
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/imxrt1050-clock.h>
+
+    clks: clock-controller@400fc000 {
+        compatible = "fsl,imxrt1050-ccm";
+        reg = <0x400fc000 0x4000>;
+        interrupts = <95>, <96>;
+        clocks = <&osc>;
+        clock-names = "osc";
+        #clock-cells = <1>;
+    };
index ec7ab14..1b2181f 100644 (file)
@@ -106,10 +106,3 @@ examples:
         #clock-cells = <1>;
         #reset-cells = <1>;
     };
-
-    usb-controller@c5004000 {
-        compatible = "nvidia,tegra20-ehci";
-        reg = <0xc5004000 0x4000>;
-        clocks = <&car TEGRA124_CLK_USB2>;
-        resets = <&car TEGRA124_CLK_USB2>;
-    };
index f832abb..bee2dd4 100644 (file)
@@ -97,10 +97,3 @@ examples:
             power-domains = <&domain>;
         };
     };
-
-    usb-controller@c5004000 {
-        compatible = "nvidia,tegra20-ehci";
-        reg = <0xc5004000 0x4000>;
-        clocks = <&car TEGRA20_CLK_USB2>;
-        resets = <&car TEGRA20_CLK_USB2>;
-    };
index 8666e99..0e96f69 100644 (file)
@@ -10,7 +10,7 @@ maintainers:
   - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 
 description:
-  The A7 PLL on the Qualcomm platforms like SDX55 is used to provide high
+  The A7 PLL on the Qualcomm platforms like SDX55, SDX65 is used to provide high
   frequency clock to the CPU.
 
 properties:
diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
deleted file mode 100644 (file)
index c5eb669..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-Qualcomm Camera Clock & Reset Controller Binding
-------------------------------------------------
-
-Required properties :
-- compatible : shall contain "qcom,sdm845-camcc".
-- reg : shall contain base register location and length.
-- #clock-cells : from common clock binding, shall contain 1.
-- #reset-cells : from common reset binding, shall contain 1.
-- #power-domain-cells : from generic power domain binding, shall contain 1.
-
-Example:
-       camcc: clock-controller@ad00000 {
-               compatible = "qcom,sdm845-camcc";
-               reg = <0xad00000 0x10000>;
-               #clock-cells = <1>;
-               #reset-cells = <1>;
-               #power-domain-cells = <1>;
-       };
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml b/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml
new file mode 100644 (file)
index 0000000..7a03ef1
--- /dev/null
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,dispcc-sm6125.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock Controller Binding for SM6125
+
+maintainers:
+  - Martin Botka <martin.botka@somainline.org>
+
+description: |
+  Qualcomm display clock control module which supports the clocks and
+  power domains on SM6125.
+
+  See also:
+    dt-bindings/clock/qcom,dispcc-sm6125.h
+
+properties:
+  compatible:
+    enum:
+      - qcom,sm6125-dispcc
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: Byte clock from DSI PHY0
+      - description: Pixel clock from DSI PHY0
+      - description: Pixel clock from DSI PHY1
+      - description: Link clock from DP PHY
+      - description: VCO DIV clock from DP PHY
+      - description: AHB config clock from GCC
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: dsi0_phy_pll_out_byteclk
+      - const: dsi0_phy_pll_out_dsiclk
+      - const: dsi1_phy_pll_out_dsiclk
+      - const: dp_phy_pll_link_clk
+      - const: dp_phy_pll_vco_div_clk
+      - const: cfg_ahb_clk
+
+  '#clock-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmcc.h>
+    #include <dt-bindings/clock/qcom,gcc-sm6125.h>
+    clock-controller@5f00000 {
+      compatible = "qcom,sm6125-dispcc";
+      reg = <0x5f00000 0x20000>;
+      clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+               <&dsi0_phy 0>,
+               <&dsi0_phy 1>,
+               <&dsi1_phy 1>,
+               <&dp_phy 0>,
+               <&dp_phy 1>,
+               <&gcc GCC_DISP_AHB_CLK>;
+      clock-names = "bi_tcxo",
+                    "dsi0_phy_pll_out_byteclk",
+                    "dsi0_phy_pll_out_dsiclk",
+                    "dsi1_phy_pll_out_dsiclk",
+                    "dp_phy_pll_link_clk",
+                    "dp_phy_pll_vco_div_clk",
+                    "cfg_ahb_clk";
+      #clock-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6350.yaml b/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6350.yaml
new file mode 100644 (file)
index 0000000..e706678
--- /dev/null
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,dispcc-sm6350.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock & Reset Controller Binding for SM6350
+
+maintainers:
+  - Konrad Dybcio <konrad.dybcio@somainline.org>
+
+description: |
+  Qualcomm display clock control module which supports the clocks, resets and
+  power domains on SM6350.
+
+  See also dt-bindings/clock/qcom,dispcc-sm6350.h.
+
+properties:
+  compatible:
+    const: qcom,sm6350-dispcc
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: GPLL0 source from GCC
+      - description: Byte clock from DSI PHY
+      - description: Pixel clock from DSI PHY
+      - description: Link clock from DP PHY
+      - description: VCO DIV clock from DP PHY
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: gcc_disp_gpll0_clk
+      - const: dsi0_phy_pll_out_byteclk
+      - const: dsi0_phy_pll_out_dsiclk
+      - const: dp_phy_pll_link_clk
+      - const: dp_phy_pll_vco_div_clk
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,gcc-sm6350.h>
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@af00000 {
+      compatible = "qcom,sm6350-dispcc";
+      reg = <0x0af00000 0x20000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>,
+               <&gcc GCC_DISP_GPLL0_CLK>,
+               <&dsi_phy 0>,
+               <&dsi_phy 1>,
+               <&dp_phy 0>,
+               <&dp_phy 1>;
+      clock-names = "bi_tcxo",
+                    "gcc_disp_gpll0_clk",
+                    "dsi0_phy_pll_out_byteclk",
+                    "dsi0_phy_pll_out_dsiclk",
+                    "dp_phy_pll_link_clk",
+                    "dp_phy_pll_vco_div_clk";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
index 8e2eac6..9793641 100644 (file)
@@ -6,6 +6,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Qualcomm Global Clock & Reset Controller Binding for APQ8064
 
+allOf:
+  - $ref: qcom,gcc.yaml#
+
 maintainers:
   - Stephen Boyd <sboyd@kernel.org>
   - Taniya Das <tdas@codeaurora.org>
@@ -17,22 +20,12 @@ description: |
   See also:
   - dt-bindings/clock/qcom,gcc-msm8960.h
   - dt-bindings/reset/qcom,gcc-msm8960.h
+  - dt-bindings/clock/qcom,gcc-apq8084.h
+  - dt-bindings/reset/qcom,gcc-apq8084.h
 
 properties:
   compatible:
-    const: qcom,gcc-apq8064
-
-  '#clock-cells':
-    const: 1
-
-  '#reset-cells':
-    const: 1
-
-  '#power-domain-cells':
-    const: 1
-
-  reg:
-    maxItems: 1
+    const: qcom,gcc-apq8084
 
   nvmem-cells:
     minItems: 1
@@ -53,21 +46,13 @@ properties:
   '#thermal-sensor-cells':
     const: 1
 
-  protected-clocks:
-    description:
-      Protected clock specifier list as per common clock binding.
-
 required:
   - compatible
-  - reg
-  - '#clock-cells'
-  - '#reset-cells'
-  - '#power-domain-cells'
   - nvmem-cells
   - nvmem-cell-names
   - '#thermal-sensor-cells'
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8064.yaml
new file mode 100644 (file)
index 0000000..9eb91dd
--- /dev/null
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-ipq8064.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for IPQ8064
+
+allOf:
+  - $ref: qcom,gcc.yaml#
+
+maintainers:
+  - Ansuel Smith <ansuelsmth@gmail.com>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains on IPQ8064.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064)
+  - dt-bindings/reset/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064)
+
+properties:
+  compatible:
+    items:
+      - const: qcom,gcc-ipq8064
+      - const: syscon
+
+  clocks:
+    items:
+      - description: PXO source
+      - description: CXO source
+
+  clock-names:
+    items:
+      - const: pxo
+      - const: cxo
+
+  thermal-sensor:
+    type: object
+
+    allOf:
+      - $ref: /schemas/thermal/qcom-tsens.yaml#
+
+required:
+  - compatible
+  - clocks
+  - clock-names
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    gcc: clock-controller@900000 {
+      compatible = "qcom,gcc-ipq8064", "syscon";
+      reg = <0x00900000 0x4000>;
+      clocks = <&pxo_board>, <&cxo_board>;
+      clock-names = "pxo", "cxo";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+
+      tsens: thermal-sensor {
+        compatible = "qcom,ipq8064-tsens";
+
+        nvmem-cells = <&tsens_calib>, <&tsens_calib_backup>;
+        nvmem-cell-names = "calib", "calib_backup";
+        interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "uplow";
+
+        #qcom,sensors = <11>;
+        #thermal-sensor-cells = <1>;
+      };
+    };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml
new file mode 100644 (file)
index 0000000..6c45e0f
--- /dev/null
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-other.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding
+
+maintainers:
+  - Stephen Boyd <sboyd@kernel.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-ipq4019.h
+  - dt-bindings/clock/qcom,gcc-ipq6018.h
+  - dt-bindings/reset/qcom,gcc-ipq6018.h
+  - dt-bindings/clock/qcom,gcc-msm8939.h
+  - dt-bindings/clock/qcom,gcc-msm8953.h
+  - dt-bindings/reset/qcom,gcc-msm8939.h
+  - dt-bindings/clock/qcom,gcc-msm8660.h
+  - dt-bindings/reset/qcom,gcc-msm8660.h
+  - dt-bindings/clock/qcom,gcc-msm8974.h (qcom,gcc-msm8226 and qcom,gcc-msm8974)
+  - dt-bindings/reset/qcom,gcc-msm8974.h (qcom,gcc-msm8226 and qcom,gcc-msm8974)
+  - dt-bindings/clock/qcom,gcc-mdm9607.h
+  - dt-bindings/clock/qcom,gcc-mdm9615.h
+  - dt-bindings/reset/qcom,gcc-mdm9615.h
+  - dt-bindings/clock/qcom,gcc-sdm660.h  (qcom,gcc-sdm630 and qcom,gcc-sdm660)
+
+allOf:
+  - $ref: "qcom,gcc.yaml#"
+
+properties:
+  compatible:
+    enum:
+      - qcom,gcc-ipq4019
+      - qcom,gcc-ipq6018
+      - qcom,gcc-mdm9607
+      - qcom,gcc-msm8226
+      - qcom,gcc-msm8660
+      - qcom,gcc-msm8916
+      - qcom,gcc-msm8939
+      - qcom,gcc-msm8953
+      - qcom,gcc-msm8960
+      - qcom,gcc-msm8974
+      - qcom,gcc-msm8974pro
+      - qcom,gcc-msm8974pro-ac
+      - qcom,gcc-mdm9615
+      - qcom,gcc-sdm630
+      - qcom,gcc-sdm660
+
+required:
+  - compatible
+
+unevaluatedProperties: false
+
+examples:
+  # Example for GCC for MSM8960:
+  - |
+    clock-controller@900000 {
+      compatible = "qcom,gcc-msm8960";
+      reg = <0x900000 0x4000>;
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
index f66d703..2ed27a2 100644 (file)
@@ -4,57 +4,17 @@
 $id: http://devicetree.org/schemas/clock/qcom,gcc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: Qualcomm Global Clock & Reset Controller Binding
+title: Qualcomm Global Clock & Reset Controller Binding Common Bindings
 
 maintainers:
   - Stephen Boyd <sboyd@kernel.org>
   - Taniya Das <tdas@codeaurora.org>
 
 description: |
-  Qualcomm global clock control module which supports the clocks, resets and
-  power domains.
-
-  See also:
-  - dt-bindings/clock/qcom,gcc-apq8084.h
-  - dt-bindings/reset/qcom,gcc-apq8084.h
-  - dt-bindings/clock/qcom,gcc-ipq4019.h
-  - dt-bindings/clock/qcom,gcc-ipq6018.h
-  - dt-bindings/reset/qcom,gcc-ipq6018.h
-  - dt-bindings/clock/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064)
-  - dt-bindings/reset/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064)
-  - dt-bindings/clock/qcom,gcc-msm8939.h
-  - dt-bindings/clock/qcom,gcc-msm8953.h
-  - dt-bindings/reset/qcom,gcc-msm8939.h
-  - dt-bindings/clock/qcom,gcc-msm8660.h
-  - dt-bindings/reset/qcom,gcc-msm8660.h
-  - dt-bindings/clock/qcom,gcc-msm8974.h (qcom,gcc-msm8226 and qcom,gcc-msm8974)
-  - dt-bindings/reset/qcom,gcc-msm8974.h (qcom,gcc-msm8226 and qcom,gcc-msm8974)
-  - dt-bindings/clock/qcom,gcc-mdm9607.h
-  - dt-bindings/clock/qcom,gcc-mdm9615.h
-  - dt-bindings/reset/qcom,gcc-mdm9615.h
-  - dt-bindings/clock/qcom,gcc-sdm660.h  (qcom,gcc-sdm630 and qcom,gcc-sdm660)
+  Common bindings for Qualcomm global clock control module which supports
+  the clocks, resets and power domains.
 
 properties:
-  compatible:
-    enum:
-      - qcom,gcc-apq8084
-      - qcom,gcc-ipq4019
-      - qcom,gcc-ipq6018
-      - qcom,gcc-ipq8064
-      - qcom,gcc-mdm9607
-      - qcom,gcc-msm8226
-      - qcom,gcc-msm8660
-      - qcom,gcc-msm8916
-      - qcom,gcc-msm8939
-      - qcom,gcc-msm8953
-      - qcom,gcc-msm8960
-      - qcom,gcc-msm8974
-      - qcom,gcc-msm8974pro
-      - qcom,gcc-msm8974pro-ac
-      - qcom,gcc-mdm9615
-      - qcom,gcc-sdm630
-      - qcom,gcc-sdm660
-
   '#clock-cells':
     const: 1
 
@@ -72,22 +32,11 @@ properties:
       Protected clock specifier list as per common clock binding.
 
 required:
-  - compatible
   - reg
   - '#clock-cells'
   - '#reset-cells'
   - '#power-domain-cells'
 
-additionalProperties: false
+additionalProperties: true
 
-examples:
-  # Example for GCC for MSM8960:
-  - |
-    clock-controller@900000 {
-      compatible = "qcom,gcc-msm8960";
-      reg = <0x900000 0x4000>;
-      #clock-cells = <1>;
-      #reset-cells = <1>;
-      #power-domain-cells = <1>;
-    };
 ...
index 46dff46..9ebcb19 100644 (file)
@@ -17,6 +17,7 @@ description: |
     dt-bindings/clock/qcom,gpucc-sdm845.h
     dt-bindings/clock/qcom,gpucc-sc7180.h
     dt-bindings/clock/qcom,gpucc-sc7280.h
+    dt-bindings/clock/qcom,gpucc-sm6350.h
     dt-bindings/clock/qcom,gpucc-sm8150.h
     dt-bindings/clock/qcom,gpucc-sm8250.h
 
@@ -27,6 +28,7 @@ properties:
       - qcom,sc7180-gpucc
       - qcom,sc7280-gpucc
       - qcom,sc8180x-gpucc
+      - qcom,sm6350-gpucc
       - qcom,sm8150-gpucc
       - qcom,sm8250-gpucc
 
index 68fdc3d..4b79e89 100644 (file)
@@ -19,6 +19,7 @@ properties:
     enum:
       - qcom,mmcc-apq8064
       - qcom,mmcc-apq8084
+      - qcom,mmcc-msm8226
       - qcom,mmcc-msm8660
       - qcom,mmcc-msm8960
       - qcom,mmcc-msm8974
diff --git a/Documentation/devicetree/bindings/clock/qcom,qcm2290-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcm2290-dispcc.yaml
new file mode 100644 (file)
index 0000000..973e408
--- /dev/null
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,qcm2290-dispcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock & Reset Controller Binding for qcm2290
+
+maintainers:
+  - Loic Poulain <loic.poulain@linaro.org>
+
+description: |
+  Qualcomm display clock control module which supports the clocks, resets and
+  power domains on qcm2290.
+
+  See also dt-bindings/clock/qcom,dispcc-qcm2290.h.
+
+properties:
+  compatible:
+    const: qcom,qcm2290-dispcc
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: Board active-only XO source
+      - description: GPLL0 source from GCC
+      - description: GPLL0 div source from GCC
+      - description: Byte clock from DSI PHY
+      - description: Pixel clock from DSI PHY
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: bi_tcxo_ao
+      - const: gcc_disp_gpll0_clk_src
+      - const: gcc_disp_gpll0_div_clk_src
+      - const: dsi0_phy_pll_out_byteclk
+      - const: dsi0_phy_pll_out_dsiclk
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,dispcc-qcm2290.h>
+    #include <dt-bindings/clock/qcom,gcc-qcm2290.h>
+    #include <dt-bindings/clock/qcom,rpmcc.h>
+    clock-controller@5f00000 {
+            compatible = "qcom,qcm2290-dispcc";
+            reg = <0x5f00000 0x20000>;
+            clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+                     <&rpmcc RPM_SMD_XO_A_CLK_SRC>,
+                     <&gcc GCC_DISP_GPLL0_CLK_SRC>,
+                     <&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
+                     <&dsi0_phy 0>,
+                     <&dsi0_phy 1>;
+            clock-names = "bi_tcxo",
+                          "bi_tcxo_ao",
+                          "gcc_disp_gpll0_clk_src",
+                          "gcc_disp_gpll0_div_clk_src",
+                          "dsi0_phy_pll_out_byteclk",
+                          "dsi0_phy_pll_out_dsiclk";
+            #clock-cells = <1>;
+            #reset-cells = <1>;
+            #power-domain-cells = <1>;
+    };
+...
index 8406dde..8fcaf41 100644 (file)
@@ -20,6 +20,7 @@ properties:
       - qcom,sc7180-rpmh-clk
       - qcom,sc7280-rpmh-clk
       - qcom,sc8180x-rpmh-clk
+      - qcom,sc8280xp-rpmh-clk
       - qcom,sdm845-rpmh-clk
       - qcom,sdx55-rpmh-clk
       - qcom,sdx65-rpmh-clk
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-camcc.yaml
new file mode 100644 (file)
index 0000000..d4239cc
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sdm845-camcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Camera Clock & Reset Controller Binding for SDM845
+
+maintainers:
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+  Qualcomm camera clock control module which supports the clocks, resets and
+  power domains on SDM845.
+
+  See also dt-bindings/clock/qcom,camcc-sm845.h
+
+properties:
+  compatible:
+    const: qcom,sdm845-camcc
+
+  clocks:
+    items:
+      - description: Board XO source
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@ad00000 {
+      compatible = "qcom,sdm845-camcc";
+      reg = <0x0ad00000 0x10000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>;
+      clock-names = "bi_tcxo";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/renesas,9series.yaml b/Documentation/devicetree/bindings/clock/renesas,9series.yaml
new file mode 100644 (file)
index 0000000..102eb95
--- /dev/null
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/renesas,9series.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding for Renesas 9-series I2C PCIe clock generators
+
+description: |
+  The Renesas 9-series are I2C PCIe clock generators providing
+  from 1 to 20 output clocks.
+
+  When referencing the provided clock in the DT using phandle
+  and clock specifier, the following mapping applies:
+
+  - 9FGV0241:
+    0 -- DIF0
+    1 -- DIF1
+
+maintainers:
+  - Marek Vasut <marex@denx.de>
+
+properties:
+  compatible:
+    enum:
+      - renesas,9fgv0241
+
+  reg:
+    description: I2C device address
+    enum: [ 0x68, 0x6a ]
+
+  '#clock-cells':
+    const: 1
+
+  clocks:
+    items:
+      - description: XTal input clock
+
+  renesas,out-amplitude-microvolt:
+    enum: [ 600000, 700000, 800000, 900000 ]
+    description: Output clock signal amplitude
+
+  renesas,out-spread-spectrum:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum: [ 100000, 99750, 99500 ]
+    description: Output clock down spread in pcm (1/1000 of percent)
+
+patternProperties:
+  "^DIF[0-19]$":
+    type: object
+    description:
+      Description of one of the outputs (DIF0..DIF19).
+
+    properties:
+      renesas,slew-rate:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [ 2000000, 3000000 ]
+        description: Output clock slew rate select in V/ns
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    /* 25MHz reference crystal */
+    ref25: ref25m {
+        compatible = "fixed-clock";
+        #clock-cells = <0>;
+        clock-frequency = <25000000>;
+    };
+
+    i2c@0 {
+        reg = <0x0 0x100>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        rs9: clock-generator@6a {
+            compatible = "renesas,9fgv0241";
+            reg = <0x6a>;
+            #clock-cells = <1>;
+
+            clocks = <&ref25m>;
+
+            DIF0 {
+                renesas,slew-rate = <3000000>;
+            };
+        };
+    };
+
+...
index c55a7c4..2197c95 100644 (file)
@@ -51,6 +51,18 @@ additionalProperties: false
 examples:
   - |
     #include <dt-bindings/clock/r8a73a4-clock.h>
+
+    cpg_clocks: cpg_clocks@e6150000 {
+            compatible = "renesas,r8a73a4-cpg-clocks";
+            reg = <0xe6150000 0x10000>;
+            clocks = <&extal1_clk>, <&extal2_clk>;
+            #clock-cells = <1>;
+            clock-output-names = "main", "pll0", "pll1", "pll2",
+                                  "pll2s", "pll2h", "z", "z2",
+                                  "i", "m3", "b", "m1", "m2",
+                                  "zx", "zs", "hp";
+    };
+
     sdhi2_clk: sdhi2_clk@e615007c {
             compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
             reg = <0xe615007c 4>;
index 30b2e3d..bd3af8f 100644 (file)
@@ -4,13 +4,13 @@
 $id: "http://devicetree.org/schemas/clock/renesas,rzg2l-cpg.yaml#"
 $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 
-title: Renesas RZ/G2L Clock Pulse Generator / Module Standby Mode
+title: Renesas RZ/{G2L,V2L} Clock Pulse Generator / Module Standby Mode
 
 maintainers:
   - Geert Uytterhoeven <geert+renesas@glider.be>
 
 description: |
-  On Renesas RZ/G2L SoC, the CPG (Clock Pulse Generator) and Module
+  On Renesas RZ/{G2L,V2L} SoC, the CPG (Clock Pulse Generator) and Module
   Standby Mode share the same register block.
 
   They provide the following functionalities:
@@ -22,7 +22,9 @@ description: |
 
 properties:
   compatible:
-    const: renesas,r9a07g044-cpg  # RZ/G2{L,LC}
+    enum:
+      - renesas,r9a07g044-cpg  # RZ/G2{L,LC}
+      - renesas,r9a07g054-cpg  # RZ/V2L
 
   reg:
     maxItems: 1
@@ -40,9 +42,9 @@ properties:
     description: |
       - For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
         and a core clock reference, as defined in
-        <dt-bindings/clock/r9a07g044-cpg.h>
+        <dt-bindings/clock/r9a07g*-cpg.h>
       - For module clocks, the two clock specifier cells must be "CPG_MOD" and
-        a module number, as defined in the <dt-bindings/clock/r9a07g044-cpg.h>.
+        a module number, as defined in the <dt-bindings/clock/r9a07g0*-cpg.h>.
     const: 2
 
   '#power-domain-cells':
@@ -56,7 +58,7 @@ properties:
   '#reset-cells':
     description:
       The single reset specifier cell must be the module number, as defined in
-      the <dt-bindings/clock/r9a07g044-cpg.h>.
+      the <dt-bindings/clock/r9a07g0*-cpg.h>.
     const: 1
 
 required:
diff --git a/Documentation/devicetree/bindings/clock/starfive,jh7100-audclk.yaml b/Documentation/devicetree/bindings/clock/starfive,jh7100-audclk.yaml
new file mode 100644 (file)
index 0000000..8f49a1a
--- /dev/null
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/starfive,jh7100-audclk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: StarFive JH7100 Audio Clock Generator
+
+maintainers:
+  - Emil Renner Berthing <kernel@esmil.dk>
+
+properties:
+  compatible:
+    const: starfive,jh7100-audclk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Audio source clock
+      - description: External 12.288MHz clock
+      - description: Domain 7 AHB bus clock
+
+  clock-names:
+    items:
+      - const: audio_src
+      - const: audio_12288
+      - const: dom7ahb_bus
+
+  '#clock-cells':
+    const: 1
+    description:
+      See <dt-bindings/clock/starfive-jh7100-audio.h> for valid indices.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/starfive-jh7100.h>
+
+    clock-controller@10480000 {
+            compatible = "starfive,jh7100-audclk";
+            reg = <0x10480000 0x10000>;
+            clocks = <&clkgen JH7100_CLK_AUDIO_SRC>,
+                     <&clkgen JH7100_CLK_AUDIO_12288>,
+                     <&clkgen JH7100_CLK_DOM7AHB_BUS>;
+            clock-names = "audio_src", "audio_12288", "dom7ahb_bus";
+            #clock-cells = <1>;
+    };
diff --git a/Documentation/devicetree/bindings/cpu/idle-states.yaml b/Documentation/devicetree/bindings/cpu/idle-states.yaml
new file mode 100644 (file)
index 0000000..5daa219
--- /dev/null
@@ -0,0 +1,855 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cpu/idle-states.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Idle states binding description
+
+maintainers:
+  - Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+  - Anup Patel <anup@brainfault.org>
+
+description: |+
+  ==========================================
+  1 - Introduction
+  ==========================================
+
+  ARM and RISC-V systems contain HW capable of managing power consumption
+  dynamically, where cores can be put in different low-power states (ranging
+  from simple wfi to power gating) according to OS PM policies. The CPU states
+  representing the range of dynamic idle states that a processor can enter at
+  run-time, can be specified through device tree bindings representing the
+  parameters required to enter/exit specific idle states on a given processor.
+
+  ==========================================
+  2 - ARM idle states
+  ==========================================
+
+  According to the Server Base System Architecture document (SBSA, [3]), the
+  power states an ARM CPU can be put into are identified by the following list:
+
+  - Running
+  - Idle_standby
+  - Idle_retention
+  - Sleep
+  - Off
+
+  The power states described in the SBSA document define the basic CPU states on
+  top of which ARM platforms implement power management schemes that allow an OS
+  PM implementation to put the processor in different idle states (which include
+  states listed above; "off" state is not an idle state since it does not have
+  wake-up capabilities, hence it is not considered in this document).
+
+  Idle state parameters (e.g. entry latency) are platform specific and need to
+  be characterized with bindings that provide the required information to OS PM
+  code so that it can build the required tables and use them at runtime.
+
+  The device tree binding definition for ARM idle states is the subject of this
+  document.
+
+  ==========================================
+  3 - RISC-V idle states
+  ==========================================
+
+  On RISC-V systems, the HARTs (or CPUs) [6] can be put in platform specific
+  suspend (or idle) states (ranging from simple WFI, power gating, etc). The
+  RISC-V SBI v0.3 (or higher) [7] hart state management extension provides a
+  standard mechanism for OS to request HART state transitions.
+
+  The platform specific suspend (or idle) states of a hart can be either
+  retentive or non-rententive in nature. A retentive suspend state will
+  preserve HART registers and CSR values for all privilege modes whereas
+  a non-retentive suspend state will not preserve HART registers and CSR
+  values.
+
+  ===========================================
+  4 - idle-states definitions
+  ===========================================
+
+  Idle states are characterized for a specific system through a set of
+  timing and energy related properties, that underline the HW behaviour
+  triggered upon idle states entry and exit.
+
+  The following diagram depicts the CPU execution phases and related timing
+  properties required to enter and exit an idle state:
+
+  ..__[EXEC]__|__[PREP]__|__[ENTRY]__|__[IDLE]__|__[EXIT]__|__[EXEC]__..
+              |          |           |          |          |
+
+              |<------ entry ------->|
+              |       latency        |
+                                                |<- exit ->|
+                                                |  latency |
+              |<-------- min-residency -------->|
+                         |<-------  wakeup-latency ------->|
+
+      Diagram 1: CPU idle state execution phases
+
+  EXEC:  Normal CPU execution.
+
+  PREP:  Preparation phase before committing the hardware to idle mode
+    like cache flushing. This is abortable on pending wake-up
+    event conditions. The abort latency is assumed to be negligible
+    (i.e. less than the ENTRY + EXIT duration). If aborted, CPU
+    goes back to EXEC. This phase is optional. If not abortable,
+    this should be included in the ENTRY phase instead.
+
+  ENTRY:  The hardware is committed to idle mode. This period must run
+    to completion up to IDLE before anything else can happen.
+
+  IDLE:  This is the actual energy-saving idle period. This may last
+    between 0 and infinite time, until a wake-up event occurs.
+
+  EXIT:  Period during which the CPU is brought back to operational
+    mode (EXEC).
+
+  entry-latency: Worst case latency required to enter the idle state. The
+  exit-latency may be guaranteed only after entry-latency has passed.
+
+  min-residency: Minimum period, including preparation and entry, for a given
+  idle state to be worthwhile energywise.
+
+  wakeup-latency: Maximum delay between the signaling of a wake-up event and the
+  CPU being able to execute normal code again. If not specified, this is assumed
+  to be entry-latency + exit-latency.
+
+  These timing parameters can be used by an OS in different circumstances.
+
+  An idle CPU requires the expected min-residency time to select the most
+  appropriate idle state based on the expected expiry time of the next IRQ
+  (i.e. wake-up) that causes the CPU to return to the EXEC phase.
+
+  An operating system scheduler may need to compute the shortest wake-up delay
+  for CPUs in the system by detecting how long will it take to get a CPU out
+  of an idle state, e.g.:
+
+  wakeup-delay = exit-latency + max(entry-latency - (now - entry-timestamp), 0)
+
+  In other words, the scheduler can make its scheduling decision by selecting
+  (e.g. waking-up) the CPU with the shortest wake-up delay.
+  The wake-up delay must take into account the entry latency if that period
+  has not expired. The abortable nature of the PREP period can be ignored
+  if it cannot be relied upon (e.g. the PREP deadline may occur much sooner than
+  the worst case since it depends on the CPU operating conditions, i.e. caches
+  state).
+
+  An OS has to reliably probe the wakeup-latency since some devices can enforce
+  latency constraint guarantees to work properly, so the OS has to detect the
+  worst case wake-up latency it can incur if a CPU is allowed to enter an
+  idle state, and possibly to prevent that to guarantee reliable device
+  functioning.
+
+  The min-residency time parameter deserves further explanation since it is
+  expressed in time units but must factor in energy consumption coefficients.
+
+  The energy consumption of a cpu when it enters a power state can be roughly
+  characterised by the following graph:
+
+                 |
+                 |
+                 |
+             e   |
+             n   |                                      /---
+             e   |                               /------
+             r   |                        /------
+             g   |                  /-----
+             y   |           /------
+                 |       ----
+                 |      /|
+                 |     / |
+                 |    /  |
+                 |   /   |
+                 |  /    |
+                 | /     |
+                 |/      |
+            -----|-------+----------------------------------
+                0|       1                              time(ms)
+
+      Graph 1: Energy vs time example
+
+  The graph is split in two parts delimited by time 1ms on the X-axis.
+  The graph curve with X-axis values = { x | 0 < x < 1ms } has a steep slope
+  and denotes the energy costs incurred while entering and leaving the idle
+  state.
+  The graph curve in the area delimited by X-axis values = {x | x > 1ms } has
+  shallower slope and essentially represents the energy consumption of the idle
+  state.
+
+  min-residency is defined for a given idle state as the minimum expected
+  residency time for a state (inclusive of preparation and entry) after
+  which choosing that state become the most energy efficient option. A good
+  way to visualise this, is by taking the same graph above and comparing some
+  states energy consumptions plots.
+
+  For sake of simplicity, let's consider a system with two idle states IDLE1,
+  and IDLE2:
+
+            |
+            |
+            |
+            |                                                  /-- IDLE1
+         e  |                                              /---
+         n  |                                         /----
+         e  |                                     /---
+         r  |                                /-----/--------- IDLE2
+         g  |                    /-------/---------
+         y  |        ------------    /---|
+            |       /           /----    |
+            |      /        /---         |
+            |     /    /----             |
+            |    / /---                  |
+            |   ---                      |
+            |  /                         |
+            | /                          |
+            |/                           |                  time
+         ---/----------------------------+------------------------
+            |IDLE1-energy < IDLE2-energy | IDLE2-energy < IDLE1-energy
+                                         |
+                                  IDLE2-min-residency
+
+      Graph 2: idle states min-residency example
+
+  In graph 2 above, that takes into account idle states entry/exit energy
+  costs, it is clear that if the idle state residency time (i.e. time till next
+  wake-up IRQ) is less than IDLE2-min-residency, IDLE1 is the better idle state
+  choice energywise.
+
+  This is mainly down to the fact that IDLE1 entry/exit energy costs are lower
+  than IDLE2.
+
+  However, the lower power consumption (i.e. shallower energy curve slope) of
+  idle state IDLE2 implies that after a suitable time, IDLE2 becomes more energy
+  efficient.
+
+  The time at which IDLE2 becomes more energy efficient than IDLE1 (and other
+  shallower states in a system with multiple idle states) is defined
+  IDLE2-min-residency and corresponds to the time when energy consumption of
+  IDLE1 and IDLE2 states breaks even.
+
+  The definitions provided in this section underpin the idle states
+  properties specification that is the subject of the following sections.
+
+  ===========================================
+  5 - idle-states node
+  ===========================================
+
+  The processor idle states are defined within the idle-states node, which is
+  a direct child of the cpus node [1] and provides a container where the
+  processor idle states, defined as device tree nodes, are listed.
+
+  On ARM systems, it is a container of processor idle states nodes. If the
+  system does not provide CPU power management capabilities, or the processor
+  just supports idle_standby, an idle-states node is not required.
+
+  ===========================================
+  6 - References
+  ===========================================
+
+  [1] ARM Linux Kernel documentation - CPUs bindings
+      Documentation/devicetree/bindings/arm/cpus.yaml
+
+  [2] ARM Linux Kernel documentation - PSCI bindings
+      Documentation/devicetree/bindings/arm/psci.yaml
+
+  [3] ARM Server Base System Architecture (SBSA)
+      http://infocenter.arm.com/help/index.jsp
+
+  [4] ARM Architecture Reference Manuals
+      http://infocenter.arm.com/help/index.jsp
+
+  [5] ARM Linux Kernel documentation - Booting AArch64 Linux
+      Documentation/arm64/booting.rst
+
+  [6] RISC-V Linux Kernel documentation - CPUs bindings
+      Documentation/devicetree/bindings/riscv/cpus.yaml
+
+  [7] RISC-V Supervisor Binary Interface (SBI)
+      http://github.com/riscv/riscv-sbi-doc/riscv-sbi.adoc
+
+properties:
+  $nodename:
+    const: idle-states
+
+  entry-method:
+    description: |
+      Usage and definition depend on ARM architecture version.
+
+      On ARM v8 64-bit this property is required.
+      On ARM 32-bit systems this property is optional
+
+      This assumes that the "enable-method" property is set to "psci" in the cpu
+      node[5] that is responsible for setting up CPU idle management in the OS
+      implementation.
+    const: psci
+
+patternProperties:
+  "^(cpu|cluster)-":
+    type: object
+    description: |
+      Each state node represents an idle state description and must be defined
+      as follows.
+
+      The idle state entered by executing the wfi instruction (idle_standby
+      SBSA,[3][4]) is considered standard on all ARM and RISC-V platforms and
+      therefore must not be listed.
+
+      In addition to the properties listed above, a state node may require
+      additional properties specific to the entry-method defined in the
+      idle-states node. Please refer to the entry-method bindings
+      documentation for properties definitions.
+
+    properties:
+      compatible:
+        enum:
+          - arm,idle-state
+          - riscv,idle-state
+
+      arm,psci-suspend-param:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description: |
+          power_state parameter to pass to the ARM PSCI suspend call.
+
+          Device tree nodes that require usage of PSCI CPU_SUSPEND function
+          (i.e. idle states node with entry-method property is set to "psci")
+          must specify this property.
+
+      riscv,sbi-suspend-param:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description: |
+          suspend_type parameter to pass to the RISC-V SBI HSM suspend call.
+
+          This property is required in idle state nodes of device tree meant
+          for RISC-V systems. For more details on the suspend_type parameter
+          refer the SBI specifiation v0.3 (or higher) [7].
+
+      local-timer-stop:
+        description:
+          If present the CPU local timer control logic is
+             lost on state entry, otherwise it is retained.
+        type: boolean
+
+      entry-latency-us:
+        description:
+          Worst case latency in microseconds required to enter the idle state.
+
+      exit-latency-us:
+        description:
+          Worst case latency in microseconds required to exit the idle state.
+          The exit-latency-us duration may be guaranteed only after
+          entry-latency-us has passed.
+
+      min-residency-us:
+        description:
+          Minimum residency duration in microseconds, inclusive of preparation
+          and entry, for this idle state to be considered worthwhile energy wise
+          (refer to section 2 of this document for a complete description).
+
+      wakeup-latency-us:
+        description: |
+          Maximum delay between the signaling of a wake-up event and the CPU
+          being able to execute normal code again. If omitted, this is assumed
+          to be equal to:
+
+            entry-latency-us + exit-latency-us
+
+          It is important to supply this value on systems where the duration of
+          PREP phase (see diagram 1, section 2) is non-neglibigle. In such
+          systems entry-latency-us + exit-latency-us will exceed
+          wakeup-latency-us by this duration.
+
+      idle-state-name:
+        $ref: /schemas/types.yaml#/definitions/string
+        description:
+          A string used as a descriptive name for the idle state.
+
+    additionalProperties: false
+
+    required:
+      - compatible
+      - entry-latency-us
+      - exit-latency-us
+      - min-residency-us
+
+additionalProperties: false
+
+examples:
+  - |
+
+    cpus {
+        #size-cells = <0>;
+        #address-cells = <2>;
+
+        cpu@0 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x0>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
+                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
+        };
+
+        cpu@1 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x1>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
+                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
+        };
+
+        cpu@100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x100>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
+                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
+        };
+
+        cpu@101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x101>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
+                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
+        };
+
+        cpu@10000 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x10000>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
+                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
+        };
+
+        cpu@10001 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x10001>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
+                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
+        };
+
+        cpu@10100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x10100>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
+                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
+        };
+
+        cpu@10101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x10101>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0>, <&CPU_SLEEP_0_0>,
+                    <&CLUSTER_RETENTION_0>, <&CLUSTER_SLEEP_0>;
+        };
+
+        cpu@100000000 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x0>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
+                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100000001 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x1>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
+                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100000100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x100>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
+                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100000101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x101>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
+                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100010000 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x10000>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
+                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100010001 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x10001>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
+                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100010100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x10100>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
+                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100010101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x10101>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0>, <&CPU_SLEEP_1_0>,
+                    <&CLUSTER_RETENTION_1>, <&CLUSTER_SLEEP_1>;
+        };
+
+        idle-states {
+            entry-method = "psci";
+
+            CPU_RETENTION_0_0: cpu-retention-0-0 {
+                compatible = "arm,idle-state";
+                arm,psci-suspend-param = <0x0010000>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <80>;
+            };
+
+            CLUSTER_RETENTION_0: cluster-retention-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x1010000>;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <250>;
+                wakeup-latency-us = <130>;
+            };
+
+            CPU_SLEEP_0_0: cpu-sleep-0-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x0010000>;
+                entry-latency-us = <250>;
+                exit-latency-us = <500>;
+                min-residency-us = <950>;
+            };
+
+            CLUSTER_SLEEP_0: cluster-sleep-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x1010000>;
+                entry-latency-us = <600>;
+                exit-latency-us = <1100>;
+                min-residency-us = <2700>;
+                wakeup-latency-us = <1500>;
+            };
+
+            CPU_RETENTION_1_0: cpu-retention-1-0 {
+                compatible = "arm,idle-state";
+                arm,psci-suspend-param = <0x0010000>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <90>;
+            };
+
+            CLUSTER_RETENTION_1: cluster-retention-1 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x1010000>;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <270>;
+                wakeup-latency-us = <100>;
+            };
+
+            CPU_SLEEP_1_0: cpu-sleep-1-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x0010000>;
+                entry-latency-us = <70>;
+                exit-latency-us = <100>;
+                min-residency-us = <300>;
+                wakeup-latency-us = <150>;
+            };
+
+            CLUSTER_SLEEP_1: cluster-sleep-1 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x1010000>;
+                entry-latency-us = <500>;
+                exit-latency-us = <1200>;
+                min-residency-us = <3500>;
+                wakeup-latency-us = <1300>;
+            };
+        };
+    };
+
+  - |
+    // Example 2 (ARM 32-bit, 8-cpu system, two clusters):
+
+    cpus {
+        #size-cells = <0>;
+        #address-cells = <1>;
+
+        cpu@0 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a15";
+            reg = <0x0>;
+            cpu-idle-states = <&cpu_sleep_0_0>, <&cluster_sleep_0>;
+        };
+
+        cpu@1 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a15";
+            reg = <0x1>;
+            cpu-idle-states = <&cpu_sleep_0_0>, <&cluster_sleep_0>;
+        };
+
+        cpu@2 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a15";
+            reg = <0x2>;
+            cpu-idle-states = <&cpu_sleep_0_0>, <&cluster_sleep_0>;
+        };
+
+        cpu@3 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a15";
+            reg = <0x3>;
+            cpu-idle-states = <&cpu_sleep_0_0>, <&cluster_sleep_0>;
+        };
+
+        cpu@100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a7";
+            reg = <0x100>;
+            cpu-idle-states = <&cpu_sleep_1_0>, <&cluster_sleep_1>;
+        };
+
+        cpu@101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a7";
+            reg = <0x101>;
+            cpu-idle-states = <&cpu_sleep_1_0>, <&cluster_sleep_1>;
+        };
+
+        cpu@102 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a7";
+            reg = <0x102>;
+            cpu-idle-states = <&cpu_sleep_1_0>, <&cluster_sleep_1>;
+        };
+
+        cpu@103 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a7";
+            reg = <0x103>;
+            cpu-idle-states = <&cpu_sleep_1_0>, <&cluster_sleep_1>;
+        };
+
+        idle-states {
+            cpu_sleep_0_0: cpu-sleep-0-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                entry-latency-us = <200>;
+                exit-latency-us = <100>;
+                min-residency-us = <400>;
+                wakeup-latency-us = <250>;
+            };
+
+            cluster_sleep_0: cluster-sleep-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                entry-latency-us = <500>;
+                exit-latency-us = <1500>;
+                min-residency-us = <2500>;
+                wakeup-latency-us = <1700>;
+            };
+
+            cpu_sleep_1_0: cpu-sleep-1-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                entry-latency-us = <300>;
+                exit-latency-us = <500>;
+                min-residency-us = <900>;
+                wakeup-latency-us = <600>;
+            };
+
+            cluster_sleep_1: cluster-sleep-1 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                entry-latency-us = <800>;
+                exit-latency-us = <2000>;
+                min-residency-us = <6500>;
+                wakeup-latency-us = <2300>;
+            };
+        };
+    };
+
+  - |
+    // Example 3 (RISC-V 64-bit, 4-cpu systems, two clusters):
+
+    cpus {
+        #size-cells = <0>;
+        #address-cells = <1>;
+
+        cpu@0 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x0>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_0_0 &CPU_NONRET_0_0
+                            &CLUSTER_RET_0 &CLUSTER_NONRET_0>;
+
+            cpu_intc0: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@1 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x1>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_0_0 &CPU_NONRET_0_0
+                            &CLUSTER_RET_0 &CLUSTER_NONRET_0>;
+
+            cpu_intc1: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@10 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x10>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_1_0 &CPU_NONRET_1_0
+                            &CLUSTER_RET_1 &CLUSTER_NONRET_1>;
+
+            cpu_intc10: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@11 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x11>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_1_0 &CPU_NONRET_1_0
+                            &CLUSTER_RET_1 &CLUSTER_NONRET_1>;
+
+            cpu_intc11: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        idle-states {
+            CPU_RET_0_0: cpu-retentive-0-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x10000000>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <80>;
+            };
+
+            CPU_NONRET_0_0: cpu-nonretentive-0-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x90000000>;
+                entry-latency-us = <250>;
+                exit-latency-us = <500>;
+                min-residency-us = <950>;
+            };
+
+            CLUSTER_RET_0: cluster-retentive-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x11000000>;
+                local-timer-stop;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <250>;
+                wakeup-latency-us = <130>;
+            };
+
+            CLUSTER_NONRET_0: cluster-nonretentive-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x91000000>;
+                local-timer-stop;
+                entry-latency-us = <600>;
+                exit-latency-us = <1100>;
+                min-residency-us = <2700>;
+                wakeup-latency-us = <1500>;
+            };
+
+            CPU_RET_1_0: cpu-retentive-1-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x10000010>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <80>;
+            };
+
+            CPU_NONRET_1_0: cpu-nonretentive-1-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x90000010>;
+                entry-latency-us = <250>;
+                exit-latency-us = <500>;
+                min-residency-us = <950>;
+            };
+
+            CLUSTER_RET_1: cluster-retentive-1 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x11000010>;
+                local-timer-stop;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <250>;
+                wakeup-latency-us = <130>;
+            };
+
+            CLUSTER_NONRET_1: cluster-nonretentive-1 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x91000010>;
+                local-timer-stop;
+                entry-latency-us = <600>;
+                exit-latency-us = <1100>;
+                min-residency-us = <2700>;
+                wakeup-latency-us = <1500>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml
new file mode 100644 (file)
index 0000000..54d68fc
--- /dev/null
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/mediatek,uart-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek UART APDMA controller
+
+maintainers:
+  - Long Cheng <long.cheng@mediatek.com>
+
+description: |
+  The MediaTek UART APDMA controller provides DMA capabilities
+  for the UART peripheral bus.
+
+allOf:
+  - $ref: "dma-controller.yaml#"
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - mediatek,mt2712-uart-dma
+              - mediatek,mt8516-uart-dma
+          - const: mediatek,mt6577-uart-dma
+      - enum:
+          - mediatek,mt6577-uart-dma
+
+  reg:
+    minItems: 1
+    maxItems: 16
+
+  interrupts:
+    description: |
+      TX, RX interrupt lines for each UART APDMA channel
+    minItems: 1
+    maxItems: 16
+
+  clocks:
+    description: Must contain one entry for the APDMA main clock
+    maxItems: 1
+
+  clock-names:
+    const: apdma
+
+  "#dma-cells":
+    const: 1
+    description: |
+      The first cell specifies the UART APDMA channel number
+
+  dma-requests:
+    description: |
+      Number of virtual channels of the UART APDMA controller
+    maximum: 16
+
+  mediatek,dma-33bits:
+    type: boolean
+    description: Enable 33-bits UART APDMA support
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+additionalProperties: false
+
+if:
+  not:
+    required:
+      - dma-requests
+then:
+  properties:
+    interrupts:
+      maxItems: 8
+    reg:
+      maxItems: 8
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/mt2712-clk.h>
+    soc {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        apdma: dma-controller@11000400 {
+            compatible = "mediatek,mt2712-uart-dma",
+                         "mediatek,mt6577-uart-dma";
+            reg = <0 0x11000400 0 0x80>,
+                  <0 0x11000480 0 0x80>,
+                  <0 0x11000500 0 0x80>,
+                  <0 0x11000580 0 0x80>,
+                  <0 0x11000600 0 0x80>,
+                  <0 0x11000680 0 0x80>,
+                  <0 0x11000700 0 0x80>,
+                  <0 0x11000780 0 0x80>,
+                  <0 0x11000800 0 0x80>,
+                  <0 0x11000880 0 0x80>,
+                  <0 0x11000900 0 0x80>,
+                  <0 0x11000980 0 0x80>;
+            interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>,
+                         <GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>;
+            dma-requests = <12>;
+            clocks = <&pericfg CLK_PERI_AP_DMA>;
+            clock-names = "apdma";
+            mediatek,dma-33bits;
+            #dma-cells = <1>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
deleted file mode 100644 (file)
index fef9c1e..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-* Mediatek UART APDMA Controller
-
-Required properties:
-- compatible should contain:
-  * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA
-  * "mediatek,mt6577-uart-dma" for MT6577 and all of the above
-  * "mediatek,mt8516-uart-dma", "mediatek,mt6577" for MT8516 SoC
-
-- reg: The base address of the APDMA register bank.
-
-- interrupts: A single interrupt specifier.
- One interrupt per dma-requests, or 8 if no dma-requests property is present
-
-- dma-requests: The number of DMA channels
-
-- clocks : Must contain an entry for each entry in clock-names.
-  See ../clocks/clock-bindings.txt for details.
-- clock-names: The APDMA clock for register accesses
-
-- mediatek,dma-33bits: Present if the DMA requires support
-
-Examples:
-
-       apdma: dma-controller@11000400 {
-               compatible = "mediatek,mt2712-uart-dma",
-                            "mediatek,mt6577-uart-dma";
-               reg = <0 0x11000400 0 0x80>,
-                     <0 0x11000480 0 0x80>,
-                     <0 0x11000500 0 0x80>,
-                     <0 0x11000580 0 0x80>,
-                     <0 0x11000600 0 0x80>,
-                     <0 0x11000680 0 0x80>,
-                     <0 0x11000700 0 0x80>,
-                     <0 0x11000780 0 0x80>,
-                     <0 0x11000800 0 0x80>,
-                     <0 0x11000880 0 0x80>,
-                     <0 0x11000900 0 0x80>,
-                     <0 0x11000980 0 0x80>;
-               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>;
-               dma-requests = <12>;
-               clocks = <&pericfg CLK_PERI_AP_DMA>;
-               clock-names = "apdma";
-               mediatek,dma-33bits;
-               #dma-cells = <1>;
-       };
index 7a4f415..1e25c5b 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/dma/renesas,rz-dmac.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: Renesas RZ/G2L DMA Controller
+title: Renesas RZ/{G2L,G2UL,V2L} DMA Controller
 
 maintainers:
   - Biju Das <biju.das.jz@bp.renesas.com>
@@ -16,7 +16,9 @@ properties:
   compatible:
     items:
       - enum:
+          - renesas,r9a07g043-dmac # RZ/G2UL
           - renesas,r9a07g044-dmac # RZ/G2{L,LC}
+          - renesas,r9a07g054-dmac # RZ/V2L
       - const: renesas,rz-dmac
 
   reg:
index 427c587..939e31c 100644 (file)
@@ -79,7 +79,7 @@ examples:
         interrupts = <7>, <8>, <9>, <10>, <11>, <12>, <13>, <14>, <15>, <16>,
                      <17>, <18>, <19>, <20>, <21>, <22>;
         reg = <0x10060000 0x1000>;
-        clocks = <&tlclk PRCI_CLK_TLCLK>;
+        clocks = <&tlclk FU540_PRCI_CLK_TLCLK>;
         gpio-controller;
         #gpio-cells = <2>;
         interrupt-controller;
index ae1b37d..0a955c7 100644 (file)
@@ -39,39 +39,8 @@ additionalProperties: false
 examples:
 
   - |
-    /* OMAP4 SoCs */
-    hwspinlock: spinlock@4a0f6000 {
+    spinlock@4a0f6000 {
         compatible = "ti,omap4-hwspinlock";
         reg = <0x4a0f6000 0x1000>;
         #hwlock-cells = <1>;
     };
-
-  - |
-    / {
-        /* K3 AM65x SoCs */
-        model = "Texas Instruments K3 AM654 SoC";
-        compatible = "ti,am654-evm", "ti,am654";
-        #address-cells = <2>;
-        #size-cells = <2>;
-
-        bus@100000 {
-            compatible = "simple-bus";
-            #address-cells = <2>;
-            #size-cells = <2>;
-            ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */
-                     <0x00 0x30800000 0x00 0x30800000 0x00 0x0bc00000>; /* Main NavSS */
-
-            bus@30800000 {
-                compatible = "simple-mfd";
-                #address-cells = <2>;
-                #size-cells = <2>;
-                ranges = <0x00 0x30800000 0x00 0x30800000 0x00 0x0bc00000>;
-
-                spinlock@30e00000 {
-                    compatible = "ti,am654-hwspinlock";
-                    reg = <0x00 0x30e00000 0x00 0x1000>;
-                    #hwlock-cells = <1>;
-                };
-            };
-        };
-    };
diff --git a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
new file mode 100644 (file)
index 0000000..b177064
--- /dev/null
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/mediatek,mt6779-keypad.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek's Keypad Controller device tree bindings
+
+maintainers:
+  - Fengping Yu <fengping.yu@mediatek.com>
+
+allOf:
+  - $ref: "/schemas/input/matrix-keymap.yaml#"
+
+description: |
+  Mediatek's Keypad controller is used to interface a SoC with a matrix-type
+  keypad device. The keypad controller supports multiple row and column lines.
+  A key can be placed at each intersection of a unique row and a unique column.
+  The keypad controller can sense a key-press and key-release and report the
+  event using a interrupt to the cpu.
+
+properties:
+  compatible:
+    oneOf:
+      - const: mediatek,mt6779-keypad
+      - items:
+          - enum:
+              - mediatek,mt6873-keypad
+          - const: mediatek,mt6779-keypad
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: kpd
+
+  wakeup-source:
+    description: use any event on keypad as wakeup event
+    type: boolean
+
+  debounce-delay-ms:
+    maximum: 256
+    default: 16
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/input/input.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    soc {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        keyboard@10010000 {
+          compatible = "mediatek,mt6779-keypad";
+          reg = <0 0x10010000 0 0x1000>;
+          interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_FALLING>;
+          clocks = <&clk26m>;
+          clock-names = "kpd";
+        };
+    };
index 535d928..9d00f2a 100644 (file)
@@ -9,7 +9,10 @@ For MT6397/MT6323 MFD bindings see:
 Documentation/devicetree/bindings/mfd/mt6397.txt
 
 Required properties:
-- compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
+- compatible: Should be one of:
+       - "mediatek,mt6397-keys"
+       - "mediatek,mt6323-keys"
+       - "mediatek,mt6358-keys"
 - linux,keycodes: See Documentation/devicetree/bindings/input/input.yaml
 
 Optional Properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
new file mode 100644 (file)
index 0000000..e3a2b87
--- /dev/null
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/imagis,ist3038c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Imagis IST30XXC family touchscreen controller bindings
+
+maintainers:
+  - Markuss Broks <markuss.broks@gmail.com>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  $nodename:
+    pattern: "^touchscreen@[0-9a-f]+$"
+
+  compatible:
+    enum:
+      - imagis,ist3038c
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  vdd-supply:
+    description: Power supply regulator for the chip
+
+  vddio-supply:
+    description: Power supply regulator for the I2C bus
+
+  touchscreen-size-x: true
+  touchscreen-size-y: true
+  touchscreen-fuzz-x: true
+  touchscreen-fuzz-y: true
+  touchscreen-inverted-x: true
+  touchscreen-inverted-y: true
+  touchscreen-swapped-x-y: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - touchscreen-size-x
+  - touchscreen-size-y
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      touchscreen@50 {
+        compatible = "imagis,ist3038c";
+        reg = <0x50>;
+        interrupt-parent = <&gpio>;
+        interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+        vdd-supply = <&ldo1_reg>;
+        vddio-supply = <&ldo2_reg>;
+        touchscreen-size-x = <720>;
+        touchscreen-size-y = <1280>;
+        touchscreen-fuzz-x = <10>;
+        touchscreen-fuzz-y = <10>;
+        touchscreen-inverted-x;
+        touchscreen-inverted-y;
+      };
+    };
+
+...
index d8b495f..afec0bd 100644 (file)
@@ -85,6 +85,10 @@ properties:
 
   ec-pwm:
     $ref: "/schemas/pwm/google,cros-ec-pwm.yaml#"
+    deprecated: true
+
+  pwm:
+    $ref: "/schemas/pwm/google,cros-ec-pwm.yaml#"
 
   keyboard-controller:
     $ref: "/schemas/input/google,cros-ec-keyb.yaml#"
index fcf5035..1f57468 100644 (file)
@@ -7,7 +7,9 @@ This device has following properties:
 
 Required properties:
 
-- compatible: Should be qcom,qcs404-ethqos"
+- compatible: Should be one of:
+               "qcom,qcs404-ethqos"
+               "qcom,sm8150-ethqos"
 
 - reg: Address and length of the register set for the device
 
index 392f0ab..195e6af 100644 (file)
@@ -104,7 +104,7 @@ examples:
                             <0x0 0x0 0x0 0x2 &plic0 58>,
                             <0x0 0x0 0x0 0x3 &plic0 59>,
                             <0x0 0x0 0x0 0x4 &plic0 60>;
-            clocks = <&prci PRCI_CLK_PCIE_AUX>;
+            clocks = <&prci FU740_PRCI_CLK_PCIE_AUX>;
             resets = <&prci 4>;
             pwren-gpios = <&gpio 5 0>;
             reset-gpios = <&gpio 8 0>;
index 800d511..e93e935 100644 (file)
@@ -52,33 +52,36 @@ properties:
   resets:
     maxItems: 1
 
-if:
-  properties:
-    compatible:
-      contains:
-        const: allwinner,sun50i-h6-pwm
-
-then:
-  properties:
-    clocks:
-      maxItems: 2
-
-    clock-names:
-      items:
-        - const: mod
-        - const: bus
-
-  required:
-    - clock-names
-    - resets
-
-else:
-  properties:
-    clocks:
-      maxItems: 1
+
+allOf:
+  - $ref: pwm.yaml#
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun50i-h6-pwm
+
+    then:
+      properties:
+        clocks:
+          maxItems: 2
+
+        clock-names:
+          items:
+            - const: mod
+            - const: bus
+
+      required:
+        - clock-names
+        - resets
+
+    else:
+      properties:
+        clocks:
+          maxItems: 1
 
 required:
-  - "#pwm-cells"
   - compatible
   - reg
   - clocks
index 4080e09..119de3d 100644 (file)
@@ -28,7 +28,6 @@ properties:
 required:
   - compatible
   - reg
-  - "#pwm-cells"
   - clocks
 
 additionalProperties: false
index 4cfbffd..7ab6912 100644 (file)
@@ -16,6 +16,9 @@ description: |
   An EC PWM node should be only found as a sub-node of the EC node (see
   Documentation/devicetree/bindings/mfd/google,cros-ec.yaml).
 
+allOf:
+  - $ref: pwm.yaml#
+
 properties:
   compatible:
     const: google,cros-ec-pwm
@@ -39,7 +42,7 @@ examples:
             compatible = "google,cros-ec-spi";
             reg = <0>;
 
-            cros_ec_pwm: ec-pwm {
+            cros_ec_pwm: pwm {
                 compatible = "google,cros-ec-pwm";
                 #pwm-cells = <1>;
             };
index 379d693..b3da4e6 100644 (file)
@@ -9,6 +9,9 @@ title: Freescale i.MX PWM controller
 maintainers:
   - Philipp Zabel <p.zabel@pengutronix.de>
 
+allOf:
+  - $ref: pwm.yaml#
+
 properties:
   "#pwm-cells":
     description: |
@@ -59,7 +62,6 @@ properties:
     maxItems: 1
 
 required:
-  - "#pwm-cells"
   - compatible
   - reg
   - clocks
index fe9ef42..8bef9df 100644 (file)
@@ -13,6 +13,9 @@ description: |
   The TPM counter and period counter are shared between multiple
   channels, so all channels should use same period setting.
 
+allOf:
+  - $ref: pwm.yaml#
+
 properties:
   "#pwm-cells":
     const: 3
@@ -34,7 +37,6 @@ properties:
     maxItems: 1
 
 required:
-  - "#pwm-cells"
   - compatible
   - reg
   - clocks
index 11a6065..59d7c4d 100644 (file)
@@ -9,6 +9,9 @@ title: LGM SoC PWM fan controller
 maintainers:
   - Rahul Tanwar <rtanwar@maxlinear.com>
 
+allOf:
+  - $ref: pwm.yaml#
+
 properties:
   compatible:
     const: intel,lgm-pwm
index 1d7c27b..0a46af2 100644 (file)
@@ -15,6 +15,9 @@ description: |
   Documentation/devicetree/bindings/mfd/iqs62x.yaml for further details as
   well as an example.
 
+allOf:
+  - $ref: pwm.yaml#
+
 properties:
   compatible:
     enum:
@@ -25,7 +28,6 @@ properties:
 
 required:
   - compatible
-  - "#pwm-cells"
 
 additionalProperties: false
 
index 8740e07..a34cbc1 100644 (file)
@@ -10,6 +10,9 @@ maintainers:
   - Shawn Guo <shawnguo@kernel.org>
   - Anson Huang <anson.huang@nxp.com>
 
+allOf:
+  - $ref: pwm.yaml#
+
 properties:
   compatible:
     enum:
@@ -28,7 +31,6 @@ properties:
 required:
   - compatible
   - reg
-  - "#pwm-cells"
   - fsl,pwm-number
 
 additionalProperties: false
index 902b271..691e58b 100644 (file)
@@ -6,6 +6,7 @@ Required properties:
    - "mediatek,mt6595-disp-pwm": found on mt6595 SoC.
    - "mediatek,mt8167-disp-pwm", "mediatek,mt8173-disp-pwm": found on mt8167 SoC.
    - "mediatek,mt8173-disp-pwm": found on mt8173 SoC.
+   - "mediatek,mt8183-disp-pwm": found on mt8183 SoC.$
  - reg: physical base address and length of the controller's registers.
  - #pwm-cells: must be 2. See pwm.yaml in this directory for a description of
    the cell format.
index 81a54a4..a336ff9 100644 (file)
@@ -51,42 +51,44 @@ properties:
 required:
   - compatible
   - reg
-  - "#pwm-cells"
-
-if:
-  properties:
-    compatible:
-      contains:
-        enum:
-          - rockchip,rk3328-pwm
-          - rockchip,rv1108-pwm
-
-then:
-  properties:
-    clocks:
-      items:
-        - description: Used to derive the functional clock for the device.
-        - description: Used as the APB bus clock.
-
-    clock-names:
-      items:
-        - const: pwm
-        - const: pclk
-
-  required:
-    - clocks
-    - clock-names
-
-else:
-  properties:
-    clocks:
-      maxItems: 1
-      description:
-        Used both to derive the functional clock
-        for the device and as the bus clock.
-
-  required:
-    - clocks
+
+allOf:
+  - $ref: pwm.yaml#
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - rockchip,rk3328-pwm
+              - rockchip,rv1108-pwm
+
+    then:
+      properties:
+        clocks:
+          items:
+            - description: Used to derive the functional clock for the device.
+            - description: Used as the APB bus clock.
+
+        clock-names:
+          items:
+            - const: pwm
+            - const: pclk
+
+      required:
+        - clocks
+        - clock-names
+
+    else:
+      properties:
+        clocks:
+          maxItems: 1
+          description:
+            Used both to derive the functional clock
+            for the device and as the bus clock.
+
+      required:
+        - clocks
 
 additionalProperties: false
 
index 188679c..fe603fb 100644 (file)
@@ -86,7 +86,6 @@ required:
   - clocks
   - clock-names
   - compatible
-  - "#pwm-cells"
   - reg
 
 additionalProperties: false
index db41cd7..605c176 100644 (file)
@@ -21,6 +21,9 @@ description:
 
   https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/pwm
 
+allOf:
+  - $ref: pwm.yaml#
+
 properties:
   compatible:
     items:
@@ -54,7 +57,6 @@ required:
   - compatible
   - reg
   - clocks
-  - "#pwm-cells"
   - interrupts
 
 additionalProperties: false
index ed35b6c..3840ae7 100644 (file)
@@ -47,7 +47,6 @@ properties:
 required:
   - compatible
   - reg
-  - "#pwm-cells"
   - clocks
   - clock-names
 
index ee312cb..70a8f76 100644 (file)
@@ -48,7 +48,6 @@ properties:
 required:
   - compatible
   - reg
-  - "#pwm-cells"
   - clocks
   - clock-names
 
index 7ea1070..1c94acb 100644 (file)
@@ -59,21 +59,23 @@ properties:
 required:
   - compatible
   - reg
-  - '#pwm-cells'
   - clocks
   - power-domains
 
-if:
-  not:
-    properties:
-      compatible:
-        contains:
-          enum:
-            - renesas,pwm-r8a7778
-            - renesas,pwm-r8a7779
-then:
-  required:
-    - resets
+allOf:
+  - $ref: pwm.yaml#
+
+  - if:
+      not:
+        properties:
+          compatible:
+            contains:
+              enum:
+                - renesas,pwm-r8a7778
+                - renesas,pwm-r8a7779
+    then:
+      required:
+        - resets
 
 additionalProperties: false
 
index 1f5c638..c6b2ab5 100644 (file)
@@ -68,7 +68,6 @@ properties:
 required:
   - compatible
   - reg
-  - '#pwm-cells'
   - clocks
   - power-domains
 
index d350f5e..4662266 100644 (file)
@@ -9,6 +9,9 @@ title: Toshiba Visconti PWM Controller
 maintainers:
   - Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
 
+allOf:
+  - $ref: pwm.yaml#
+
 properties:
   compatible:
     items:
@@ -23,7 +26,6 @@ properties:
 required:
   - compatible
   - reg
-  - '#pwm-cells'
 
 additionalProperties: false
 
index d21a25e..5b693a2 100644 (file)
@@ -17,6 +17,7 @@ properties:
   compatible:
     enum:
       - mediatek,mt8183-scp
+      - mediatek,mt8186-scp
       - mediatek,mt8192-scp
       - mediatek,mt8195-scp
 
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,hexagon-v56.txt b/Documentation/devicetree/bindings/remoteproc/qcom,hexagon-v56.txt
deleted file mode 100644 (file)
index 1337a3d..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-Qualcomm Technology Inc. Hexagon v56 Peripheral Image Loader
-
-This document defines the binding for a component that loads and boots firmware
-on the Qualcomm Technology Inc. Hexagon v56 core.
-
-- compatible:
-       Usage: required
-       Value type: <string>
-       Definition: must be one of:
-                   "qcom,qcs404-cdsp-pil",
-                   "qcom,sdm845-adsp-pil"
-
-- reg:
-       Usage: required
-       Value type: <prop-encoded-array>
-       Definition: must specify the base address and size of the qdsp6ss register
-
-- interrupts-extended:
-       Usage: required
-       Value type: <prop-encoded-array>
-       Definition: must list the watchdog, fatal IRQs ready, handover and
-                   stop-ack IRQs
-
-- interrupt-names:
-       Usage: required
-       Value type: <stringlist>
-       Definition: must be "wdog", "fatal", "ready", "handover", "stop-ack"
-
-- clocks:
-       Usage: required
-       Value type: <prop-encoded-array>
-       Definition:  List of phandles and clock specifier pairs for the Hexagon,
-                    per clock-names below.
-
-- clock-names:
-       Usage: required for SDM845 ADSP
-       Value type: <stringlist>
-       Definition: List of clock input name strings sorted in the same
-                   order as the clocks property. Definition must have
-                   "xo", "sway_cbcr", "lpass_ahbs_aon_cbcr",
-                   "lpass_ahbm_aon_cbcr", "qdsp6ss_xo", "qdsp6ss_sleep"
-                   and "qdsp6ss_core".
-
-- clock-names:
-       Usage: required for QCS404 CDSP
-       Value type: <stringlist>
-       Definition: List of clock input name strings sorted in the same
-                   order as the clocks property. Definition must have
-                   "xo", "sway", "tbu", "bimc", "ahb_aon", "q6ss_slave",
-                   "q6ss_master", "q6_axim".
-
-- power-domains:
-       Usage: required
-       Value type: <phandle>
-       Definition: reference to cx power domain node.
-
-- resets:
-       Usage: required
-       Value type: <phandle>
-       Definition: reference to the list of resets for the Hexagon.
-
-- reset-names:
-        Usage: required for SDM845 ADSP
-        Value type: <stringlist>
-        Definition: must be "pdc_sync" and "cc_lpass"
-
-- reset-names:
-        Usage: required for QCS404 CDSP
-        Value type: <stringlist>
-        Definition: must be "restart"
-
-- qcom,halt-regs:
-       Usage: required
-       Value type: <prop-encoded-array>
-       Definition: a phandle reference to a syscon representing TCSR followed
-                   by the offset within syscon for Hexagon halt register.
-
-- memory-region:
-       Usage: required
-       Value type: <phandle>
-       Definition: reference to the reserved-memory for the firmware
-
-- qcom,smem-states:
-       Usage: required
-       Value type: <phandle>
-       Definition: reference to the smem state for requesting the Hexagon to
-                   shut down
-
-- qcom,smem-state-names:
-       Usage: required
-       Value type: <stringlist>
-       Definition: must be "stop"
-
-
-= SUBNODES
-The adsp node may have an subnode named "glink-edge" that describes the
-communication edge, channels and devices related to the Hexagon.
-See ../soc/qcom/qcom,glink.txt for details on how to describe these.
-
-= EXAMPLE
-The following example describes the resources needed to boot control the
-ADSP, as it is found on SDM845 boards.
-
-       remoteproc@17300000 {
-               compatible = "qcom,sdm845-adsp-pil";
-               reg = <0x17300000 0x40c>;
-
-               interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
-                       <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
-                       <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
-                       <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
-                       <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
-               interrupt-names = "wdog", "fatal", "ready",
-                       "handover", "stop-ack";
-
-               clocks = <&rpmhcc RPMH_CXO_CLK>,
-                       <&gcc GCC_LPASS_SWAY_CLK>,
-                       <&lpasscc LPASS_Q6SS_AHBS_AON_CLK>,
-                       <&lpasscc LPASS_Q6SS_AHBM_AON_CLK>,
-                       <&lpasscc LPASS_QDSP6SS_XO_CLK>,
-                       <&lpasscc LPASS_QDSP6SS_SLEEP_CLK>,
-                       <&lpasscc LPASS_QDSP6SS_CORE_CLK>;
-               clock-names = "xo", "sway_cbcr",
-                       "lpass_ahbs_aon_cbcr",
-                       "lpass_ahbm_aon_cbcr", "qdsp6ss_xo",
-                       "qdsp6ss_sleep", "qdsp6ss_core";
-
-               power-domains = <&rpmhpd SDM845_CX>;
-
-               resets = <&pdc_reset PDC_AUDIO_SYNC_RESET>,
-                        <&aoss_reset AOSS_CC_LPASS_RESTART>;
-               reset-names = "pdc_sync", "cc_lpass";
-
-               qcom,halt-regs = <&tcsr_mutex_regs 0x22000>;
-
-               memory-region = <&pil_adsp_mem>;
-
-               qcom,smem-states = <&adsp_smp2p_out 0>;
-               qcom,smem-state-names = "stop";
-       };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml
new file mode 100644 (file)
index 0000000..31413cf
--- /dev/null
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,qcs404-cdsp-pil.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QCS404 CDSP Peripheral Image Loader
+
+maintainers:
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+  This document defines the binding for a component that loads and boots firmware
+  on the Qualcomm Technology Inc. CDSP (Compute DSP).
+
+properties:
+  compatible:
+    enum:
+      - qcom,qcs404-cdsp-pil
+
+  reg:
+    maxItems: 1
+    description:
+      The base address and size of the qdsp6ss register
+
+  interrupts:
+    items:
+      - description: Watchdog interrupt
+      - description: Fatal interrupt
+      - description: Ready interrupt
+      - description: Handover interrupt
+      - description: Stop acknowledge interrupt
+
+  interrupt-names:
+    items:
+      - const: wdog
+      - const: fatal
+      - const: ready
+      - const: handover
+      - const: stop-ack
+
+  clocks:
+    items:
+      - description: XO clock
+      - description: SWAY clock
+      - description: TBU clock
+      - description: BIMC clock
+      - description: AHB AON clock
+      - description: Q6SS SLAVE clock
+      - description: Q6SS MASTER clock
+      - description: Q6 AXIM clock
+
+  clock-names:
+    items:
+      - const: xo
+      - const: sway
+      - const: tbu
+      - const: bimc
+      - const: ahb_aon
+      - const: q6ss_slave
+      - const: q6ss_master
+      - const: q6_axim
+
+  power-domains:
+    items:
+      - description: CX power domain
+
+  resets:
+    items:
+      - description: AOSS restart
+
+  reset-names:
+    items:
+      - const: restart
+
+  memory-region:
+    maxItems: 1
+    description: Reference to the reserved-memory for the Hexagon core
+
+  qcom,halt-regs:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description:
+      Phandle reference to a syscon representing TCSR followed by the
+      three offsets within syscon for q6, modem and nc halt registers.
+
+  qcom,smem-states:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description: States used by the AP to signal the Hexagon core
+    items:
+      - description: Stop the modem
+
+  qcom,smem-state-names:
+    $ref: /schemas/types.yaml#/definitions/string
+    description: The names of the state bits used for SMP2P output
+    items:
+      - const: stop
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-names
+  - clocks
+  - clock-names
+  - power-domains
+  - resets
+  - reset-names
+  - qcom,halt-regs
+  - memory-region
+  - qcom,smem-states
+  - qcom,smem-state-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/qcom,gcc-qcs404.h>
+    #include <dt-bindings/power/qcom-rpmpd.h>
+    #include <dt-bindings/clock/qcom,turingcc-qcs404.h>
+    remoteproc@b00000 {
+        compatible = "qcom,qcs404-cdsp-pil";
+        reg = <0x00b00000 0x4040>;
+
+        interrupts-extended = <&intc GIC_SPI 229 IRQ_TYPE_EDGE_RISING>,
+                              <&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+                              <&cdsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+                              <&cdsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+                              <&cdsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+        interrupt-names = "wdog", "fatal", "ready",
+                          "handover", "stop-ack";
+
+        clocks = <&xo_board>,
+                 <&gcc GCC_CDSP_CFG_AHB_CLK>,
+                 <&gcc GCC_CDSP_TBU_CLK>,
+                 <&gcc GCC_BIMC_CDSP_CLK>,
+                 <&turingcc TURING_WRAPPER_AON_CLK>,
+                 <&turingcc TURING_Q6SS_AHBS_AON_CLK>,
+                 <&turingcc TURING_Q6SS_AHBM_AON_CLK>,
+                 <&turingcc TURING_Q6SS_Q6_AXIM_CLK>;
+        clock-names = "xo",
+                      "sway",
+                      "tbu",
+                      "bimc",
+                      "ahb_aon",
+                      "q6ss_slave",
+                      "q6ss_master",
+                      "q6_axim";
+
+        power-domains = <&rpmhpd SDM845_CX>;
+
+        resets = <&gcc GCC_CDSP_RESTART>;
+        reset-names = "restart";
+
+        qcom,halt-regs = <&tcsr 0x19004>;
+
+        memory-region = <&cdsp_fw_mem>;
+
+        qcom,smem-states = <&cdsp_smp2p_out 0>;
+        qcom,smem-state-names = "stop";
+    };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
new file mode 100644 (file)
index 0000000..2424de7
--- /dev/null
@@ -0,0 +1,219 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,sc7280-wpss-pil.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SC7280 WPSS Peripheral Image Loader
+
+maintainers:
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+  This document defines the binding for a component that loads and boots firmware
+  on the Qualcomm Technology Inc. WPSS.
+
+properties:
+  compatible:
+    enum:
+      - qcom,sc7280-wpss-pil
+
+  reg:
+    maxItems: 1
+    description:
+      The base address and size of the qdsp6ss register
+
+  interrupts:
+    items:
+      - description: Watchdog interrupt
+      - description: Fatal interrupt
+      - description: Ready interrupt
+      - description: Handover interrupt
+      - description: Stop acknowledge interrupt
+      - description: Shutdown acknowledge interrupt
+
+  interrupt-names:
+    items:
+      - const: wdog
+      - const: fatal
+      - const: ready
+      - const: handover
+      - const: stop-ack
+      - const: shutdown-ack
+
+  clocks:
+    items:
+      - description: GCC WPSS AHB BDG Master clock
+      - description: GCC WPSS AHB clock
+      - description: GCC WPSS RSCP clock
+      - description: XO clock
+
+  clock-names:
+    items:
+      - const: ahb_bdg
+      - const: ahb
+      - const: rscp
+      - const: xo
+
+  power-domains:
+    items:
+      - description: CX power domain
+      - description: MX power domain
+
+  power-domain-names:
+    items:
+      - const: cx
+      - const: mx
+
+  resets:
+    items:
+      - description: AOSS restart
+      - description: PDC SYNC
+
+  reset-names:
+    items:
+      - const: restart
+      - const: pdc_sync
+
+  memory-region:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: Reference to the reserved-memory for the Hexagon core
+
+  firmware-name:
+    $ref: /schemas/types.yaml#/definitions/string
+    description:
+      The name of the firmware which should be loaded for this remote
+      processor.
+
+  qcom,halt-regs:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description:
+      Phandle reference to a syscon representing TCSR followed by the
+      three offsets within syscon for q6, modem and nc halt registers.
+
+  qcom,qmp:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: Reference to the AOSS side-channel message RAM.
+
+  qcom,smem-states:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description: States used by the AP to signal the Hexagon core
+    items:
+      - description: Stop the modem
+
+  qcom,smem-state-names:
+    $ref: /schemas/types.yaml#/definitions/string
+    description: The names of the state bits used for SMP2P output
+    items:
+      - const: stop
+
+  glink-edge:
+    type: object
+    description: |
+      Qualcomm G-Link subnode which represents communication edge, channels
+      and devices related to the ADSP.
+
+    properties:
+      interrupts:
+        items:
+          - description: IRQ from WPSS to GLINK
+
+      mboxes:
+        items:
+          - description: Mailbox for communication between APPS and WPSS
+
+      label:
+        description: The names of the state bits used for SMP2P output
+        items:
+          - const: wpss
+
+      qcom,remote-pid:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description: ID of the shared memory used by GLINK for communication with WPSS
+        maxItems: 1
+
+    required:
+      - interrupts
+      - mboxes
+      - label
+      - qcom,remote-pid
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-names
+  - clocks
+  - clock-names
+  - power-domains
+  - power-domain-names
+  - resets
+  - reset-names
+  - qcom,halt-regs
+  - memory-region
+  - qcom,qmp
+  - qcom,smem-states
+  - qcom,smem-state-names
+  - glink-edge
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/qcom,gcc-sc7280.h>
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    #include <dt-bindings/power/qcom-rpmpd.h>
+    #include <dt-bindings/reset/qcom,sdm845-aoss.h>
+    #include <dt-bindings/reset/qcom,sdm845-pdc.h>
+    #include <dt-bindings/mailbox/qcom-ipcc.h>
+    remoteproc@8a00000 {
+        compatible = "qcom,sc7280-wpss-pil";
+        reg = <0x08a00000 0x10000>;
+
+        interrupts-extended = <&intc GIC_SPI 587 IRQ_TYPE_EDGE_RISING>,
+                              <&wpss_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+                              <&wpss_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+                              <&wpss_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+                              <&wpss_smp2p_in 3 IRQ_TYPE_EDGE_RISING>,
+                              <&wpss_smp2p_in 7 IRQ_TYPE_EDGE_RISING>;
+        interrupt-names = "wdog", "fatal", "ready", "handover",
+                          "stop-ack", "shutdown-ack";
+
+        clocks = <&gcc GCC_WPSS_AHB_BDG_MST_CLK>,
+                 <&gcc GCC_WPSS_AHB_CLK>,
+                 <&gcc GCC_WPSS_RSCP_CLK>,
+                 <&rpmhcc RPMH_CXO_CLK>;
+        clock-names = "ahb_bdg", "ahb",
+                      "rscp", "xo";
+
+        power-domains = <&rpmhpd SC7280_CX>,
+                        <&rpmhpd SC7280_MX>;
+        power-domain-names = "cx", "mx";
+
+        memory-region = <&wpss_mem>;
+
+        qcom,qmp = <&aoss_qmp>;
+
+        qcom,smem-states = <&wpss_smp2p_out 0>;
+        qcom,smem-state-names = "stop";
+
+        resets = <&aoss_reset AOSS_CC_WCSS_RESTART>,
+                 <&pdc_reset PDC_WPSS_SYNC_RESET>;
+        reset-names = "restart", "pdc_sync";
+
+        qcom,halt-regs = <&tcsr_mutex 0x37000>;
+
+        glink-edge {
+            interrupts-extended = <&ipcc IPCC_CLIENT_WPSS
+                                         IPCC_MPROC_SIGNAL_GLINK_QMP
+                                         IRQ_TYPE_EDGE_RISING>;
+            mboxes = <&ipcc IPCC_CLIENT_WPSS
+                            IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+            label = "wpss";
+            qcom,remote-pid = <13>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml
new file mode 100644 (file)
index 0000000..1535bbb
--- /dev/null
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,sdm845-adsp-pil.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SDM845 ADSP Peripheral Image Loader
+
+maintainers:
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+  This document defines the binding for a component that loads and boots firmware
+  on the Qualcomm Technology Inc. ADSP.
+
+properties:
+  compatible:
+    enum:
+      - qcom,sdm845-adsp-pil
+
+  reg:
+    maxItems: 1
+    description:
+      The base address and size of the qdsp6ss register
+
+  interrupts:
+    items:
+      - description: Watchdog interrupt
+      - description: Fatal interrupt
+      - description: Ready interrupt
+      - description: Handover interrupt
+      - description: Stop acknowledge interrupt
+
+  interrupt-names:
+    items:
+      - const: wdog
+      - const: fatal
+      - const: ready
+      - const: handover
+      - const: stop-ack
+
+  clocks:
+    items:
+      - description: XO clock
+      - description: SWAY clock
+      - description: LPASS AHBS AON clock
+      - description: LPASS AHBM AON clock
+      - description: QDSP XO clock
+      - description: Q6SP6SS SLEEP clock
+      - description: Q6SP6SS CORE clock
+
+  clock-names:
+    items:
+      - const: xo
+      - const: sway_cbcr
+      - const: lpass_ahbs_aon_cbcr
+      - const: lpass_ahbm_aon_cbcr
+      - const: qdsp6ss_xo
+      - const: qdsp6ss_sleep
+      - const: qdsp6ss_core
+
+  power-domains:
+    items:
+      - description: CX power domain
+
+  resets:
+    items:
+      - description: PDC AUDIO SYNC RESET
+      - description: CC LPASS restart
+
+  reset-names:
+    items:
+      - const: pdc_sync
+      - const: cc_lpass
+
+  memory-region:
+    maxItems: 1
+    description: Reference to the reserved-memory for the Hexagon core
+
+  qcom,halt-regs:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description:
+      Phandle reference to a syscon representing TCSR followed by the
+      three offsets within syscon for q6, modem and nc halt registers.
+
+  qcom,smem-states:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description: States used by the AP to signal the Hexagon core
+    items:
+      - description: Stop the modem
+
+  qcom,smem-state-names:
+    $ref: /schemas/types.yaml#/definitions/string
+    description: The names of the state bits used for SMP2P output
+    items:
+      - const: stop
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-names
+  - clocks
+  - clock-names
+  - power-domains
+  - resets
+  - reset-names
+  - qcom,halt-regs
+  - memory-region
+  - qcom,smem-states
+  - qcom,smem-state-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+    #include <dt-bindings/clock/qcom,lpass-sdm845.h>
+    #include <dt-bindings/power/qcom-rpmpd.h>
+    #include <dt-bindings/reset/qcom,sdm845-pdc.h>
+    #include <dt-bindings/reset/qcom,sdm845-aoss.h>
+    remoteproc@17300000 {
+        compatible = "qcom,sdm845-adsp-pil";
+        reg = <0x17300000 0x40c>;
+
+        interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
+                <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+                <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+                <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+                <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+        interrupt-names = "wdog", "fatal", "ready",
+                "handover", "stop-ack";
+
+        clocks = <&rpmhcc RPMH_CXO_CLK>,
+                 <&gcc GCC_LPASS_SWAY_CLK>,
+                 <&lpasscc LPASS_Q6SS_AHBS_AON_CLK>,
+                 <&lpasscc LPASS_Q6SS_AHBM_AON_CLK>,
+                 <&lpasscc LPASS_QDSP6SS_XO_CLK>,
+                 <&lpasscc LPASS_QDSP6SS_SLEEP_CLK>,
+                 <&lpasscc LPASS_QDSP6SS_CORE_CLK>;
+        clock-names = "xo", "sway_cbcr",
+                "lpass_ahbs_aon_cbcr",
+                "lpass_ahbm_aon_cbcr", "qdsp6ss_xo",
+                "qdsp6ss_sleep", "qdsp6ss_core";
+
+        power-domains = <&rpmhpd SDM845_CX>;
+
+        resets = <&pdc_reset PDC_AUDIO_SYNC_RESET>,
+                 <&aoss_reset AOSS_CC_LPASS_RESTART>;
+        reset-names = "pdc_sync", "cc_lpass";
+
+        qcom,halt-regs = <&tcsr_mutex_regs 0x22000>;
+
+        memory-region = <&pil_adsp_mem>;
+
+        qcom,smem-states = <&adsp_smp2p_out 0>;
+        qcom,smem-state-names = "stop";
+    };
index aa5fb64..f62f646 100644 (file)
@@ -99,6 +99,12 @@ properties:
       - compatible
       - interrupt-controller
 
+  cpu-idle-states:
+    $ref: '/schemas/types.yaml#/definitions/phandle-array'
+    description: |
+      List of phandles to idle state nodes supported
+      by this hart (see ./idle-states.yaml).
+
 required:
   - riscv,isa
   - interrupt-controller
index beeb90e..0b767fe 100644 (file)
@@ -16,16 +16,22 @@ properties:
 
   compatible:
     oneOf:
-      - const: allwinner,sun6i-a31-rtc
-      - const: allwinner,sun8i-a23-rtc
-      - const: allwinner,sun8i-h3-rtc
-      - const: allwinner,sun8i-r40-rtc
-      - const: allwinner,sun8i-v3-rtc
-      - const: allwinner,sun50i-h5-rtc
+      - enum:
+          - allwinner,sun6i-a31-rtc
+          - allwinner,sun8i-a23-rtc
+          - allwinner,sun8i-h3-rtc
+          - allwinner,sun8i-r40-rtc
+          - allwinner,sun8i-v3-rtc
+          - allwinner,sun50i-h5-rtc
+          - allwinner,sun50i-h6-rtc
+          - allwinner,sun50i-h616-rtc
+          - allwinner,sun50i-r329-rtc
       - items:
           - const: allwinner,sun50i-a64-rtc
           - const: allwinner,sun8i-h3-rtc
-      - const: allwinner,sun50i-h6-rtc
+      - items:
+          - const: allwinner,sun20i-d1-rtc
+          - const: allwinner,sun50i-r329-rtc
 
   reg:
     maxItems: 1
@@ -37,7 +43,12 @@ properties:
       - description: RTC Alarm 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    maxItems: 4
+
+  clock-names:
+    minItems: 1
+    maxItems: 4
 
   clock-output-names:
     minItems: 1
@@ -85,6 +96,7 @@ allOf:
             enum:
               - allwinner,sun8i-h3-rtc
               - allwinner,sun50i-h5-rtc
+              - allwinner,sun50i-h6-rtc
 
     then:
       properties:
@@ -96,19 +108,68 @@ allOf:
       properties:
         compatible:
           contains:
-            const: allwinner,sun50i-h6-rtc
+            const: allwinner,sun50i-h616-rtc
 
     then:
       properties:
-        clock-output-names:
+        clocks:
           minItems: 3
           maxItems: 3
+          items:
+            - description: Bus clock for register access
+            - description: 24 MHz oscillator
+            - description: 32 kHz clock from the CCU
+
+        clock-names:
+          minItems: 3
+          maxItems: 3
+          items:
+            - const: bus
+            - const: hosc
+            - const: pll-32k
+
+      required:
+        - clocks
+        - clock-names
 
   - if:
       properties:
         compatible:
           contains:
-            const: allwinner,sun8i-r40-rtc
+            const: allwinner,sun50i-r329-rtc
+
+    then:
+      properties:
+        clocks:
+          minItems: 3
+          maxItems: 4
+          items:
+            - description: Bus clock for register access
+            - description: 24 MHz oscillator
+            - description: AHB parent for internal SPI clock
+            - description: External 32768 Hz oscillator
+
+        clock-names:
+          minItems: 3
+          maxItems: 4
+          items:
+            - const: bus
+            - const: hosc
+            - const: ahb
+            - const: ext-osc32k
+
+      required:
+        - clocks
+        - clock-names
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun8i-r40-rtc
+              - allwinner,sun50i-h616-rtc
+              - allwinner,sun50i-r329-rtc
 
     then:
       properties:
@@ -127,7 +188,6 @@ required:
   - compatible
   - reg
   - interrupts
-  - clock-output-names
 
 additionalProperties: false
 
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt
deleted file mode 100644 (file)
index 3f0e2a5..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-Atmel AT91SAM9260 Real Time Timer
-
-Required properties:
-- compatible: should be one of the following:
-       - "atmel,at91sam9260-rtt"
-       - "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"
-- reg: should encode the memory region of the RTT controller
-- interrupts: rtt alarm/event interrupt
-- clocks: should contain the 32 KHz slow clk that will drive the RTT block.
-- atmel,rtt-rtc-time-reg: should encode the GPBR register used to store
-       the time base when the RTT is used as an RTC.
-       The first cell should point to the GPBR node and the second one
-       encode the offset within the GPBR block (or in other words, the
-       GPBR register used to store the time base).
-
-
-Example:
-
-rtt@fffffd20 {
-       compatible = "atmel,at91sam9260-rtt";
-       reg = <0xfffffd20 0x10>;
-       interrupts = <1 4 7>;
-       clocks = <&clk32k>;
-       atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml
new file mode 100644 (file)
index 0000000..0ef1b7f
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/atmel,at91sam9260-rtt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel AT91 RTT Device Tree Bindings
+
+allOf:
+  - $ref: "rtc.yaml#"
+
+maintainers:
+  - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - const: atmel,at91sam9260-rtt
+      - items:
+          - const: microchip,sam9x60-rtt
+          - const: atmel,at91sam9260-rtt
+      - items:
+          - const: microchip,sama7g5-rtt
+          - const: microchip,sam9x60-rtt
+          - const: atmel,at91sam9260-rtt
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  atmel,rtt-rtc-time-reg:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    items:
+      - items:
+          - description: Phandle to the GPBR node.
+          - description: Offset within the GPBR block.
+    description:
+      Should encode the GPBR register used to store the time base when the
+      RTT is used as an RTC. The first cell should point to the GPBR node
+      and the second one encodes the offset within the GPBR block (or in
+      other words, the GPBR register used to store the time base).
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - atmel,rtt-rtc-time-reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    rtc@fffffd20 {
+        compatible = "atmel,at91sam9260-rtt";
+        reg = <0xfffffd20 0x10>;
+        interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+        clocks = <&clk32k>;
+        atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+    };
index 09aae43..b0a8871 100644 (file)
@@ -59,7 +59,7 @@ examples:
         interrupt-parent = <&plic0>;
         interrupts = <80>;
         reg = <0x10010000 0x1000>;
-        clocks = <&prci PRCI_CLK_TLCLK>;
+        clocks = <&prci FU540_PRCI_CLK_TLCLK>;
       };
 
 ...
index 7fb37ea..d541cf2 100644 (file)
@@ -152,6 +152,7 @@ patternProperties:
           - enum:
               - ingenic,jz4740-pwm
               - ingenic,jz4725b-pwm
+              - ingenic,x1000-pwm
           - items:
               - enum:
                   - ingenic,jz4760-pwm
index 8fe2d93..0143097 100644 (file)
@@ -560,6 +560,8 @@ patternProperties:
     description: Ingenieurburo Fur Ic-Technologie (I/F/I)
   "^ilitek,.*":
     description: ILI Technology Corporation (ILITEK)
+  "^imagis,.*":
+    description: Imagis Technologies Co., Ltd.
   "^img,.*":
     description: Imagination Technologies Ltd.
   "^imi,.*":
index 91a98cc..d060438 100644 (file)
@@ -55,6 +55,11 @@ properties:
               - renesas,r8a779a0-wdt     # R-Car V3U
           - const: renesas,rcar-gen3-wdt # R-Car Gen3 and RZ/G2
 
+      - items:
+          - enum:
+              - renesas,r8a779f0-wdt     # R-Car S4-8
+          - const: renesas,rcar-gen4-wdt # R-Car Gen4
+
   reg:
     maxItems: 1
 
index 1d8302b..be8587a 100644 (file)
@@ -14,10 +14,8 @@ Version 13
        Overview
            Supporting Documents
            Git Trees
-       LIBNVDIMM PMEM and BLK
-       Why BLK?
-           PMEM vs BLK
-               BLK-REGIONs, PMEM-REGIONs, Atomic Sectors, and DAX
+       LIBNVDIMM PMEM
+           PMEM-REGIONs, Atomic Sectors, and DAX
        Example NVDIMM Platform
        LIBNVDIMM Kernel Device Model and LIBNDCTL Userspace API
            LIBNDCTL: Context
@@ -53,19 +51,12 @@ PMEM:
   block device composed of PMEM is capable of DAX.  A PMEM address range
   may span an interleave of several DIMMs.
 
-BLK:
-  A set of one or more programmable memory mapped apertures provided
-  by a DIMM to access its media.  This indirection precludes the
-  performance benefit of interleaving, but enables DIMM-bounded failure
-  modes.
-
 DPA:
   DIMM Physical Address, is a DIMM-relative offset.  With one DIMM in
   the system there would be a 1:1 system-physical-address:DPA association.
   Once more DIMMs are added a memory controller interleave must be
   decoded to determine the DPA associated with a given
-  system-physical-address.  BLK capacity always has a 1:1 relationship
-  with a single-DIMM's DPA range.
+  system-physical-address.
 
 DAX:
   File system extensions to bypass the page cache and block layer to
@@ -84,30 +75,30 @@ BTT:
   Block Translation Table: Persistent memory is byte addressable.
   Existing software may have an expectation that the power-fail-atomicity
   of writes is at least one sector, 512 bytes.  The BTT is an indirection
-  table with atomic update semantics to front a PMEM/BLK block device
+  table with atomic update semantics to front a PMEM block device
   driver and present arbitrary atomic sector sizes.
 
 LABEL:
   Metadata stored on a DIMM device that partitions and identifies
-  (persistently names) storage between PMEM and BLK.  It also partitions
-  BLK storage to host BTTs with different parameters per BLK-partition.
-  Note that traditional partition tables, GPT/MBR, are layered on top of a
-  BLK or PMEM device.
+  (persistently names) capacity allocated to different PMEM namespaces. It
+  also indicates whether an address abstraction like a BTT is applied to
+  the namepsace.  Note that traditional partition tables, GPT/MBR, are
+  layered on top of a PMEM namespace, or an address abstraction like BTT
+  if present, but partition support is deprecated going forward.
 
 
 Overview
 ========
 
-The LIBNVDIMM subsystem provides support for three types of NVDIMMs, namely,
-PMEM, BLK, and NVDIMM devices that can simultaneously support both PMEM
-and BLK mode access.  These three modes of operation are described by
-the "NVDIMM Firmware Interface Table" (NFIT) in ACPI 6.  While the LIBNVDIMM
-implementation is generic and supports pre-NFIT platforms, it was guided
-by the superset of capabilities need to support this ACPI 6 definition
-for NVDIMM resources.  The bulk of the kernel implementation is in place
-to handle the case where DPA accessible via PMEM is aliased with DPA
-accessible via BLK.  When that occurs a LABEL is needed to reserve DPA
-for exclusive access via one mode a time.
+The LIBNVDIMM subsystem provides support for PMEM described by platform
+firmware or a device driver. On ACPI based systems the platform firmware
+conveys persistent memory resource via the ACPI NFIT "NVDIMM Firmware
+Interface Table" in ACPI 6. While the LIBNVDIMM subsystem implementation
+is generic and supports pre-NFIT platforms, it was guided by the
+superset of capabilities need to support this ACPI 6 definition for
+NVDIMM resources. The original implementation supported the
+block-window-aperture capability described in the NFIT, but that support
+has since been abandoned and never shipped in a product.
 
 Supporting Documents
 --------------------
@@ -125,107 +116,38 @@ Git Trees
 ---------
 
 LIBNVDIMM:
-       https://git.kernel.org/cgit/linux/kernel/git/djbw/nvdimm.git
+       https://git.kernel.org/cgit/linux/kernel/git/nvdimm/nvdimm.git
 LIBNDCTL:
        https://github.com/pmem/ndctl.git
-PMEM:
-       https://github.com/01org/prd
 
 
-LIBNVDIMM PMEM and BLK
-======================
+LIBNVDIMM PMEM
+==============
 
 Prior to the arrival of the NFIT, non-volatile memory was described to a
 system in various ad-hoc ways.  Usually only the bare minimum was
 provided, namely, a single system-physical-address range where writes
 are expected to be durable after a system power loss.  Now, the NFIT
 specification standardizes not only the description of PMEM, but also
-BLK and platform message-passing entry points for control and
-configuration.
-
-For each NVDIMM access method (PMEM, BLK), LIBNVDIMM provides a block
-device driver:
-
-    1. PMEM (nd_pmem.ko): Drives a system-physical-address range.  This
-       range is contiguous in system memory and may be interleaved (hardware
-       memory controller striped) across multiple DIMMs.  When interleaved the
-       platform may optionally provide details of which DIMMs are participating
-       in the interleave.
-
-       Note that while LIBNVDIMM describes system-physical-address ranges that may
-       alias with BLK access as ND_NAMESPACE_PMEM ranges and those without
-       alias as ND_NAMESPACE_IO ranges, to the nd_pmem driver there is no
-       distinction.  The different device-types are an implementation detail
-       that userspace can exploit to implement policies like "only interface
-       with address ranges from certain DIMMs".  It is worth noting that when
-       aliasing is present and a DIMM lacks a label, then no block device can
-       be created by default as userspace needs to do at least one allocation
-       of DPA to the PMEM range.  In contrast ND_NAMESPACE_IO ranges, once
-       registered, can be immediately attached to nd_pmem.
-
-    2. BLK (nd_blk.ko): This driver performs I/O using a set of platform
-       defined apertures.  A set of apertures will access just one DIMM.
-       Multiple windows (apertures) allow multiple concurrent accesses, much like
-       tagged-command-queuing, and would likely be used by different threads or
-       different CPUs.
-
-       The NFIT specification defines a standard format for a BLK-aperture, but
-       the spec also allows for vendor specific layouts, and non-NFIT BLK
-       implementations may have other designs for BLK I/O.  For this reason
-       "nd_blk" calls back into platform-specific code to perform the I/O.
-
-       One such implementation is defined in the "Driver Writer's Guide" and "DSM
-       Interface Example".
-
-
-Why BLK?
-========
+platform message-passing entry points for control and configuration.
+
+PMEM (nd_pmem.ko): Drives a system-physical-address range.  This range is
+contiguous in system memory and may be interleaved (hardware memory controller
+striped) across multiple DIMMs.  When interleaved the platform may optionally
+provide details of which DIMMs are participating in the interleave.
+
+It is worth noting that when the labeling capability is detected (a EFI
+namespace label index block is found), then no block device is created
+by default as userspace needs to do at least one allocation of DPA to
+the PMEM range.  In contrast ND_NAMESPACE_IO ranges, once registered,
+can be immediately attached to nd_pmem. This latter mode is called
+label-less or "legacy".
+
+PMEM-REGIONs, Atomic Sectors, and DAX
+-------------------------------------
 
-While PMEM provides direct byte-addressable CPU-load/store access to
-NVDIMM storage, it does not provide the best system RAS (recovery,
-availability, and serviceability) model.  An access to a corrupted
-system-physical-address address causes a CPU exception while an access
-to a corrupted address through an BLK-aperture causes that block window
-to raise an error status in a register.  The latter is more aligned with
-the standard error model that host-bus-adapter attached disks present.
-
-Also, if an administrator ever wants to replace a memory it is easier to
-service a system at DIMM module boundaries.  Compare this to PMEM where
-data could be interleaved in an opaque hardware specific manner across
-several DIMMs.
-
-PMEM vs BLK
------------
-
-BLK-apertures solve these RAS problems, but their presence is also the
-major contributing factor to the complexity of the ND subsystem.  They
-complicate the implementation because PMEM and BLK alias in DPA space.
-Any given DIMM's DPA-range may contribute to one or more
-system-physical-address sets of interleaved DIMMs, *and* may also be
-accessed in its entirety through its BLK-aperture.  Accessing a DPA
-through a system-physical-address while simultaneously accessing the
-same DPA through a BLK-aperture has undefined results.  For this reason,
-DIMMs with this dual interface configuration include a DSM function to
-store/retrieve a LABEL.  The LABEL effectively partitions the DPA-space
-into exclusive system-physical-address and BLK-aperture accessible
-regions.  For simplicity a DIMM is allowed a PMEM "region" per each
-interleave set in which it is a member.  The remaining DPA space can be
-carved into an arbitrary number of BLK devices with discontiguous
-extents.
-
-BLK-REGIONs, PMEM-REGIONs, Atomic Sectors, and DAX
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-One of the few
-reasons to allow multiple BLK namespaces per REGION is so that each
-BLK-namespace can be configured with a BTT with unique atomic sector
-sizes.  While a PMEM device can host a BTT the LABEL specification does
-not provide for a sector size to be specified for a PMEM namespace.
-
-This is due to the expectation that the primary usage model for PMEM is
-via DAX, and the BTT is incompatible with DAX.  However, for the cases
-where an application or filesystem still needs atomic sector update
-guarantees it can register a BTT on a PMEM device or partition.  See
+For the cases where an application or filesystem still needs atomic sector
+update guarantees it can register a BTT on a PMEM device or partition.  See
 LIBNVDIMM/NDCTL: Block Translation Table "btt"
 
 
@@ -236,51 +158,40 @@ For the remainder of this document the following diagram will be
 referenced for any example sysfs layouts::
 
 
-                               (a)               (b)           DIMM   BLK-REGION
+                               (a)               (b)           DIMM
             +-------------------+--------+--------+--------+
-  +------+  |       pm0.0       | blk2.0 | pm1.0  | blk2.1 |    0      region2
+  +------+  |       pm0.0       |  free  | pm1.0  |  free  |    0
   | imc0 +--+- - - region0- - - +--------+        +--------+
-  +--+---+  |       pm0.0       | blk3.0 | pm1.0  | blk3.1 |    1      region3
+  +--+---+  |       pm0.0       |  free  | pm1.0  |  free  |    1
      |      +-------------------+--------v        v--------+
   +--+---+                               |                 |
   | cpu0 |                                     region1
   +--+---+                               |                 |
      |      +----------------------------^        ^--------+
-  +--+---+  |           blk4.0           | pm1.0  | blk4.0 |    2      region4
+  +--+---+  |           free             | pm1.0  |  free  |    2
   | imc1 +--+----------------------------|        +--------+
-  +------+  |           blk5.0           | pm1.0  | blk5.0 |    3      region5
+  +------+  |           free             | pm1.0  |  free  |    3
             +----------------------------+--------+--------+
 
 In this platform we have four DIMMs and two memory controllers in one
-socket.  Each unique interface (BLK or PMEM) to DPA space is identified
-by a region device with a dynamically assigned id (REGION0 - REGION5).
+socket.  Each PMEM interleave set is identified by a region device with
+a dynamically assigned id.
 
     1. The first portion of DIMM0 and DIMM1 are interleaved as REGION0. A
        single PMEM namespace is created in the REGION0-SPA-range that spans most
        of DIMM0 and DIMM1 with a user-specified name of "pm0.0". Some of that
-       interleaved system-physical-address range is reclaimed as BLK-aperture
-       accessed space starting at DPA-offset (a) into each DIMM.  In that
-       reclaimed space we create two BLK-aperture "namespaces" from REGION2 and
-       REGION3 where "blk2.0" and "blk3.0" are just human readable names that
-       could be set to any user-desired name in the LABEL.
+       interleaved system-physical-address range is left free for
+       another PMEM namespace to be defined.
 
     2. In the last portion of DIMM0 and DIMM1 we have an interleaved
        system-physical-address range, REGION1, that spans those two DIMMs as
        well as DIMM2 and DIMM3.  Some of REGION1 is allocated to a PMEM namespace
-       named "pm1.0", the rest is reclaimed in 4 BLK-aperture namespaces (for
-       each DIMM in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
-       "blk5.0".
-
-    3. The portion of DIMM2 and DIMM3 that do not participate in the REGION1
-       interleaved system-physical-address range (i.e. the DPA address past
-       offset (b) are also included in the "blk4.0" and "blk5.0" namespaces.
-       Note, that this example shows that BLK-aperture namespaces don't need to
-       be contiguous in DPA-space.
+       named "pm1.0".
 
     This bus is provided by the kernel under the device
     /sys/devices/platform/nfit_test.0 when the nfit_test.ko module from
-    tools/testing/nvdimm is loaded.  This not only test LIBNVDIMM but the
-    acpi_nfit.ko driver as well.
+    tools/testing/nvdimm is loaded. This module is a unit test for
+    LIBNVDIMM and the  acpi_nfit.ko driver.
 
 
 LIBNVDIMM Kernel Device Model and LIBNDCTL Userspace API
@@ -469,17 +380,14 @@ identified by an "nfit_handle" a 32-bit value where:
 LIBNVDIMM/LIBNDCTL: Region
 --------------------------
 
-A generic REGION device is registered for each PMEM range or BLK-aperture
-set.  Per the example there are 6 regions: 2 PMEM and 4 BLK-aperture
-sets on the "nfit_test.0" bus.  The primary role of regions are to be a
-container of "mappings".  A mapping is a tuple of <DIMM,
-DPA-start-offset, length>.
+A generic REGION device is registered for each PMEM interleave-set /
+range. Per the example there are 2 PMEM regions on the "nfit_test.0"
+bus. The primary role of regions are to be a container of "mappings".  A
+mapping is a tuple of <DIMM, DPA-start-offset, length>.
 
-LIBNVDIMM provides a built-in driver for these REGION devices.  This driver
-is responsible for reconciling the aliased DPA mappings across all
-regions, parsing the LABEL, if present, and then emitting NAMESPACE
-devices with the resolved/exclusive DPA-boundaries for the nd_pmem or
-nd_blk device driver to consume.
+LIBNVDIMM provides a built-in driver for REGION devices.  This driver
+is responsible for all parsing LABELs, if present, and then emitting NAMESPACE
+devices for the nd_pmem driver to consume.
 
 In addition to the generic attributes of "mapping"s, "interleave_ways"
 and "size" the REGION device also exports some convenience attributes.
@@ -493,8 +401,6 @@ LIBNVDIMM: region::
 
        struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
                        struct nd_region_desc *ndr_desc);
-       struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
-                       struct nd_region_desc *ndr_desc);
 
 ::
 
@@ -527,8 +433,9 @@ LIBNDCTL: region enumeration example
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Sample region retrieval routines based on NFIT-unique data like
-"spa_index" (interleave set id) for PMEM and "nfit_handle" (dimm id) for
-BLK::
+"spa_index" (interleave set id).
+
+::
 
        static struct ndctl_region *get_pmem_region_by_spa_index(struct ndctl_bus *bus,
                        unsigned int spa_index)
@@ -544,139 +451,23 @@ BLK::
                return NULL;
        }
 
-       static struct ndctl_region *get_blk_region_by_dimm_handle(struct ndctl_bus *bus,
-                       unsigned int handle)
-       {
-               struct ndctl_region *region;
-
-               ndctl_region_foreach(bus, region) {
-                       struct ndctl_mapping *map;
-
-                       if (ndctl_region_get_type(region) != ND_DEVICE_REGION_BLOCK)
-                               continue;
-                       ndctl_mapping_foreach(region, map) {
-                               struct ndctl_dimm *dimm = ndctl_mapping_get_dimm(map);
-
-                               if (ndctl_dimm_get_handle(dimm) == handle)
-                                       return region;
-                       }
-               }
-               return NULL;
-       }
-
-
-Why Not Encode the Region Type into the Region Name?
-----------------------------------------------------
-
-At first glance it seems since NFIT defines just PMEM and BLK interface
-types that we should simply name REGION devices with something derived
-from those type names.  However, the ND subsystem explicitly keeps the
-REGION name generic and expects userspace to always consider the
-region-attributes for four reasons:
-
-    1. There are already more than two REGION and "namespace" types.  For
-       PMEM there are two subtypes.  As mentioned previously we have PMEM where
-       the constituent DIMM devices are known and anonymous PMEM.  For BLK
-       regions the NFIT specification already anticipates vendor specific
-       implementations.  The exact distinction of what a region contains is in
-       the region-attributes not the region-name or the region-devtype.
-
-    2. A region with zero child-namespaces is a possible configuration.  For
-       example, the NFIT allows for a DCR to be published without a
-       corresponding BLK-aperture.  This equates to a DIMM that can only accept
-       control/configuration messages, but no i/o through a descendant block
-       device.  Again, this "type" is advertised in the attributes ('mappings'
-       == 0) and the name does not tell you much.
-
-    3. What if a third major interface type arises in the future?  Outside
-       of vendor specific implementations, it's not difficult to envision a
-       third class of interface type beyond BLK and PMEM.  With a generic name
-       for the REGION level of the device-hierarchy old userspace
-       implementations can still make sense of new kernel advertised
-       region-types.  Userspace can always rely on the generic region
-       attributes like "mappings", "size", etc and the expected child devices
-       named "namespace".  This generic format of the device-model hierarchy
-       allows the LIBNVDIMM and LIBNDCTL implementations to be more uniform and
-       future-proof.
-
-    4. There are more robust mechanisms for determining the major type of a
-       region than a device name.  See the next section, How Do I Determine the
-       Major Type of a Region?
-
-How Do I Determine the Major Type of a Region?
-----------------------------------------------
-
-Outside of the blanket recommendation of "use libndctl", or simply
-looking at the kernel header (/usr/include/linux/ndctl.h) to decode the
-"nstype" integer attribute, here are some other options.
-
-1. module alias lookup
-^^^^^^^^^^^^^^^^^^^^^^
-
-    The whole point of region/namespace device type differentiation is to
-    decide which block-device driver will attach to a given LIBNVDIMM namespace.
-    One can simply use the modalias to lookup the resulting module.  It's
-    important to note that this method is robust in the presence of a
-    vendor-specific driver down the road.  If a vendor-specific
-    implementation wants to supplant the standard nd_blk driver it can with
-    minimal impact to the rest of LIBNVDIMM.
-
-    In fact, a vendor may also want to have a vendor-specific region-driver
-    (outside of nd_region).  For example, if a vendor defined its own LABEL
-    format it would need its own region driver to parse that LABEL and emit
-    the resulting namespaces.  The output from module resolution is more
-    accurate than a region-name or region-devtype.
-
-2. udev
-^^^^^^^
-
-    The kernel "devtype" is registered in the udev database::
-
-       # udevadm info --path=/devices/platform/nfit_test.0/ndbus0/region0
-       P: /devices/platform/nfit_test.0/ndbus0/region0
-       E: DEVPATH=/devices/platform/nfit_test.0/ndbus0/region0
-       E: DEVTYPE=nd_pmem
-       E: MODALIAS=nd:t2
-       E: SUBSYSTEM=nd
-
-       # udevadm info --path=/devices/platform/nfit_test.0/ndbus0/region4
-       P: /devices/platform/nfit_test.0/ndbus0/region4
-       E: DEVPATH=/devices/platform/nfit_test.0/ndbus0/region4
-       E: DEVTYPE=nd_blk
-       E: MODALIAS=nd:t3
-       E: SUBSYSTEM=nd
-
-    ...and is available as a region attribute, but keep in mind that the
-    "devtype" does not indicate sub-type variations and scripts should
-    really be understanding the other attributes.
-
-3. type specific attributes
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-    As it currently stands a BLK-aperture region will never have a
-    "nfit/spa_index" attribute, but neither will a non-NFIT PMEM region.  A
-    BLK region with a "mappings" value of 0 is, as mentioned above, a DIMM
-    that does not allow I/O.  A PMEM region with a "mappings" value of zero
-    is a simple system-physical-address range.
-
 
 LIBNVDIMM/LIBNDCTL: Namespace
 -----------------------------
 
-A REGION, after resolving DPA aliasing and LABEL specified boundaries,
-surfaces one or more "namespace" devices.  The arrival of a "namespace"
-device currently triggers either the nd_blk or nd_pmem driver to load
-and register a disk/block device.
+A REGION, after resolving DPA aliasing and LABEL specified boundaries, surfaces
+one or more "namespace" devices.  The arrival of a "namespace" device currently
+triggers the nd_pmem driver to load and register a disk/block device.
 
 LIBNVDIMM: namespace
 ^^^^^^^^^^^^^^^^^^^^
 
-Here is a sample layout from the three major types of NAMESPACE where
-namespace0.0 represents DIMM-info-backed PMEM (note that it has a 'uuid'
-attribute), namespace2.0 represents a BLK namespace (note it has a
-'sector_size' attribute) that, and namespace6.0 represents an anonymous
-PMEM namespace (note that has no 'uuid' attribute due to not support a
-LABEL)::
+Here is a sample layout from the 2 major types of NAMESPACE where namespace0.0
+represents DIMM-info-backed PMEM (note that it has a 'uuid' attribute), and
+namespace1.0 represents an anonymous PMEM namespace (note that has no 'uuid'
+attribute due to not support a LABEL)
+
+::
 
        /sys/devices/platform/nfit_test.0/ndbus0/region0/namespace0.0
        |-- alt_name
@@ -691,20 +482,7 @@ LABEL)::
        |-- type
        |-- uevent
        `-- uuid
-       /sys/devices/platform/nfit_test.0/ndbus0/region2/namespace2.0
-       |-- alt_name
-       |-- devtype
-       |-- dpa_extents
-       |-- force_raw
-       |-- modalias
-       |-- numa_node
-       |-- sector_size
-       |-- size
-       |-- subsystem -> ../../../../../../bus/nd
-       |-- type
-       |-- uevent
-       `-- uuid
-       /sys/devices/platform/nfit_test.1/ndbus1/region6/namespace6.0
+       /sys/devices/platform/nfit_test.1/ndbus1/region1/namespace1.0
        |-- block
        |   `-- pmem0
        |-- devtype
@@ -786,9 +564,9 @@ Why the Term "namespace"?
 LIBNVDIMM/LIBNDCTL: Block Translation Table "btt"
 -------------------------------------------------
 
-A BTT (design document: https://pmem.io/2014/09/23/btt.html) is a stacked
-block device driver that fronts either the whole block device or a
-partition of a block device emitted by either a PMEM or BLK NAMESPACE.
+A BTT (design document: https://pmem.io/2014/09/23/btt.html) is a
+personality driver for a namespace that fronts entire namespace as an
+'address abstraction'.
 
 LIBNVDIMM: btt layout
 ^^^^^^^^^^^^^^^^^^^^^
@@ -815,7 +593,9 @@ LIBNDCTL: btt creation example
 Similar to namespaces an idle BTT device is automatically created per
 region.  Each time this "seed" btt device is configured and enabled a new
 seed is created.  Creating a BTT configuration involves two steps of
-finding and idle BTT and assigning it to consume a PMEM or BLK namespace::
+finding and idle BTT and assigning it to consume a namespace.
+
+::
 
        static struct ndctl_btt *get_idle_btt(struct ndctl_region *region)
        {
@@ -863,25 +643,15 @@ For the given example above, here is the view of the objects as seen by the
 LIBNDCTL API::
 
               +---+
-              |CTX|    +---------+   +--------------+  +---------------+
-              +-+-+  +-> REGION0 +---> NAMESPACE0.0 +--> PMEM8 "pm0.0" |
-                |    | +---------+   +--------------+  +---------------+
-  +-------+     |    | +---------+   +--------------+  +---------------+
-  | DIMM0 <-+   |    +-> REGION1 +---> NAMESPACE1.0 +--> PMEM6 "pm1.0" |
-  +-------+ |   |    | +---------+   +--------------+  +---------------+
+              |CTX|
+              +-+-+
+                |
+  +-------+     |
+  | DIMM0 <-+   |      +---------+   +--------------+  +---------------+
+  +-------+ |   |    +-> REGION0 +---> NAMESPACE0.0 +--> PMEM8 "pm0.0" |
   | DIMM1 <-+ +-v--+ | +---------+   +--------------+  +---------------+
-  +-------+ +-+BUS0+---> REGION2 +-+-> NAMESPACE2.0 +--> ND6  "blk2.0" |
-  | DIMM2 <-+ +----+ | +---------+ | +--------------+  +----------------------+
-  +-------+ |        |             +-> NAMESPACE2.1 +--> ND5  "blk2.1" | BTT2 |
-  | DIMM3 <-+        |               +--------------+  +----------------------+
-  +-------+          | +---------+   +--------------+  +---------------+
-                     +-> REGION3 +-+-> NAMESPACE3.0 +--> ND4  "blk3.0" |
-                     | +---------+ | +--------------+  +----------------------+
-                     |             +-> NAMESPACE3.1 +--> ND3  "blk3.1" | BTT1 |
-                     |               +--------------+  +----------------------+
-                     | +---------+   +--------------+  +---------------+
-                     +-> REGION4 +---> NAMESPACE4.0 +--> ND2  "blk4.0" |
-                     | +---------+   +--------------+  +---------------+
-                     | +---------+   +--------------+  +----------------------+
-                     +-> REGION5 +---> NAMESPACE5.0 +--> ND1  "blk5.0" | BTT0 |
-                       +---------+   +--------------+  +---------------+------+
+  +-------+ +-+BUS0+-| +---------+   +--------------+  +----------------------+
+  | DIMM2 <-+ +----+ +-> REGION1 +---> NAMESPACE1.0 +--> PMEM6 "pm1.0" | BTT1 |
+  +-------+ |        | +---------+   +--------------+  +---------------+------+
+  | DIMM3 <-+
+  +-------+
index 4f373a8..69f0017 100644 (file)
@@ -7,6 +7,8 @@ Network Filesystem Helper Library
 .. Contents:
 
  - Overview.
+ - Per-inode context.
+   - Inode context helper functions.
  - Buffered read helpers.
    - Read helper functions.
    - Read helper structures.
@@ -28,6 +30,69 @@ Note that the library module doesn't link against local caching directly, so
 access must be provided by the netfs.
 
 
+Per-Inode Context
+=================
+
+The network filesystem helper library needs a place to store a bit of state for
+its use on each netfs inode it is helping to manage.  To this end, a context
+structure is defined::
+
+       struct netfs_i_context {
+               const struct netfs_request_ops *ops;
+               struct fscache_cookie   *cache;
+       };
+
+A network filesystem that wants to use netfs lib must place one of these
+directly after the VFS ``struct inode`` it allocates, usually as part of its
+own struct.  This can be done in a way similar to the following::
+
+       struct my_inode {
+               struct {
+                       /* These must be contiguous */
+                       struct inode            vfs_inode;
+                       struct netfs_i_context  netfs_ctx;
+               };
+               ...
+       };
+
+This allows netfslib to find its state by simple offset from the inode pointer,
+thereby allowing the netfslib helper functions to be pointed to directly by the
+VFS/VM operation tables.
+
+The structure contains the following fields:
+
+ * ``ops``
+
+   The set of operations provided by the network filesystem to netfslib.
+
+ * ``cache``
+
+   Local caching cookie, or NULL if no caching is enabled.  This field does not
+   exist if fscache is disabled.
+
+
+Inode Context Helper Functions
+------------------------------
+
+To help deal with the per-inode context, a number helper functions are
+provided.  Firstly, a function to perform basic initialisation on a context and
+set the operations table pointer::
+
+       void netfs_i_context_init(struct inode *inode,
+                                 const struct netfs_request_ops *ops);
+
+then two functions to cast between the VFS inode structure and the netfs
+context::
+
+       struct netfs_i_context *netfs_i_context(struct inode *inode);
+       struct inode *netfs_inode(struct netfs_i_context *ctx);
+
+and finally, a function to get the cache cookie pointer from the context
+attached to an inode (or NULL if fscache is disabled)::
+
+       struct fscache_cookie *netfs_i_cookie(struct inode *inode);
+
+
 Buffered Read Helpers
 =====================
 
@@ -70,38 +135,22 @@ Read Helper Functions
 
 Three read helpers are provided::
 
-       void netfs_readahead(struct readahead_control *ractl,
-                            const struct netfs_read_request_ops *ops,
-                            void *netfs_priv);
+       void netfs_readahead(struct readahead_control *ractl);
        int netfs_readpage(struct file *file,
-                          struct folio *folio,
-                          const struct netfs_read_request_ops *ops,
-                          void *netfs_priv);
+                          struct page *page);
        int netfs_write_begin(struct file *file,
                              struct address_space *mapping,
                              loff_t pos,
                              unsigned int len,
                              unsigned int flags,
                              struct folio **_folio,
-                             void **_fsdata,
-                             const struct netfs_read_request_ops *ops,
-                             void *netfs_priv);
-
-Each corresponds to a VM operation, with the addition of a couple of parameters
-for the use of the read helpers:
+                             void **_fsdata);
 
- * ``ops``
-
-   A table of operations through which the helpers can talk to the filesystem.
-
- * ``netfs_priv``
+Each corresponds to a VM address space operation.  These operations use the
+state in the per-inode context.
 
-   Filesystem private data (can be NULL).
-
-Both of these values will be stored into the read request structure.
-
-For ->readahead() and ->readpage(), the network filesystem should just jump
-into the corresponding read helper; whereas for ->write_begin(), it may be a
+For ->readahead() and ->readpage(), the network filesystem just point directly
+at the corresponding read helper; whereas for ->write_begin(), it may be a
 little more complicated as the network filesystem might want to flush
 conflicting writes or track dirty data and needs to put the acquired folio if
 an error occurs after calling the helper.
@@ -116,7 +165,7 @@ occurs, the request will get partially completed if sufficient data is read.
 
 Additionally, there is::
 
-  * void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
+  * void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
                                 ssize_t transferred_or_error,
                                 bool was_async);
 
@@ -132,7 +181,7 @@ Read Helper Structures
 The read helpers make use of a couple of structures to maintain the state of
 the read.  The first is a structure that manages a read request as a whole::
 
-       struct netfs_read_request {
+       struct netfs_io_request {
                struct inode            *inode;
                struct address_space    *mapping;
                struct netfs_cache_resources cache_resources;
@@ -140,7 +189,7 @@ the read.  The first is a structure that manages a read request as a whole::
                loff_t                  start;
                size_t                  len;
                loff_t                  i_size;
-               const struct netfs_read_request_ops *netfs_ops;
+               const struct netfs_request_ops *netfs_ops;
                unsigned int            debug_id;
                ...
        };
@@ -187,8 +236,8 @@ The above fields are the ones the netfs can use.  They are:
 The second structure is used to manage individual slices of the overall read
 request::
 
-       struct netfs_read_subrequest {
-               struct netfs_read_request *rreq;
+       struct netfs_io_subrequest {
+               struct netfs_io_request *rreq;
                loff_t                  start;
                size_t                  len;
                size_t                  transferred;
@@ -244,32 +293,26 @@ Read Helper Operations
 The network filesystem must provide the read helpers with a table of operations
 through which it can issue requests and negotiate::
 
-       struct netfs_read_request_ops {
-               void (*init_rreq)(struct netfs_read_request *rreq, struct file *file);
-               bool (*is_cache_enabled)(struct inode *inode);
-               int (*begin_cache_operation)(struct netfs_read_request *rreq);
-               void (*expand_readahead)(struct netfs_read_request *rreq);
-               bool (*clamp_length)(struct netfs_read_subrequest *subreq);
-               void (*issue_op)(struct netfs_read_subrequest *subreq);
-               bool (*is_still_valid)(struct netfs_read_request *rreq);
+       struct netfs_request_ops {
+               void (*init_request)(struct netfs_io_request *rreq, struct file *file);
+               int (*begin_cache_operation)(struct netfs_io_request *rreq);
+               void (*expand_readahead)(struct netfs_io_request *rreq);
+               bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+               void (*issue_read)(struct netfs_io_subrequest *subreq);
+               bool (*is_still_valid)(struct netfs_io_request *rreq);
                int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                         struct folio *folio, void **_fsdata);
-               void (*done)(struct netfs_read_request *rreq);
+               void (*done)(struct netfs_io_request *rreq);
                void (*cleanup)(struct address_space *mapping, void *netfs_priv);
        };
 
 The operations are as follows:
 
- * ``init_rreq()``
+ * ``init_request()``
 
    [Optional] This is called to initialise the request structure.  It is given
    the file for reference and can modify the ->netfs_priv value.
 
- * ``is_cache_enabled()``
-
-   [Required] This is called by netfs_write_begin() to ask if the file is being
-   cached.  It should return true if it is being cached and false otherwise.
-
  * ``begin_cache_operation()``
 
    [Optional] This is called to ask the network filesystem to call into the
@@ -305,7 +348,7 @@ The operations are as follows:
 
    This should return 0 on success and an error code on error.
 
- * ``issue_op()``
+ * ``issue_read()``
 
    [Required] The helpers use this to dispatch a subrequest to the server for
    reading.  In the subrequest, ->start, ->len and ->transferred indicate what
@@ -420,12 +463,12 @@ The network filesystem's ->begin_cache_operation() method is called to set up a
 cache and this must call into the cache to do the work.  If using fscache, for
 example, the cache would call::
 
-       int fscache_begin_read_operation(struct netfs_read_request *rreq,
+       int fscache_begin_read_operation(struct netfs_io_request *rreq,
                                         struct fscache_cookie *cookie);
 
 passing in the request pointer and the cookie corresponding to the file.
 
-The netfs_read_request object contains a place for the cache to hang its
+The netfs_io_request object contains a place for the cache to hang its
 state::
 
        struct netfs_cache_resources {
@@ -443,7 +486,7 @@ operation table looks like the following::
                void (*expand_readahead)(struct netfs_cache_resources *cres,
                                         loff_t *_start, size_t *_len, loff_t i_size);
 
-               enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
+               enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
                                                       loff_t i_size);
 
                int (*read)(struct netfs_cache_resources *cres,
@@ -562,4 +605,5 @@ API Function Reference
 ======================
 
 .. kernel-doc:: include/linux/netfs.h
-.. kernel-doc:: fs/netfs/read_helper.c
+.. kernel-doc:: fs/netfs/buffered_read.c
+.. kernel-doc:: fs/netfs/io.c
index 2d1fc03..ef19b9c 100644 (file)
@@ -77,6 +77,17 @@ HOSTLDLIBS
 ----------
 Additional libraries to link against when building host programs.
 
+.. _userkbuildflags:
+
+USERCFLAGS
+----------
+Additional options used for $(CC) when compiling userprogs.
+
+USERLDFLAGS
+-----------
+Additional options used for $(LD) when linking userprogs. userprogs are linked
+with CC, so $(USERLDFLAGS) should include "-Wl," prefix as applicable.
+
 KBUILD_KCONFIG
 --------------
 Set the top-level Kconfig file to the value of this environment
index d326168..b854bb4 100644 (file)
@@ -49,17 +49,36 @@ example: ::
 LLVM Utilities
 --------------
 
-LLVM has substitutes for GNU binutils utilities. Kbuild supports ``LLVM=1``
-to enable them. ::
-
-       make LLVM=1
-
-They can be enabled individually. The full list of the parameters: ::
+LLVM has substitutes for GNU binutils utilities. They can be enabled individually.
+The full list of supported make variables::
 
        make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \
          OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump READELF=llvm-readelf \
          HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar HOSTLD=ld.lld
 
+To simplify the above command, Kbuild supports the ``LLVM`` variable::
+
+       make LLVM=1
+
+If your LLVM tools are not available in your PATH, you can supply their
+location using the LLVM variable with a trailing slash::
+
+       make LLVM=/path/to/llvm/
+
+which will use ``/path/to/llvm/clang``, ``/path/to/llvm/ld.lld``, etc.
+
+If your LLVM tools have a version suffix and you want to test with that
+explicit version rather than the unsuffixed executables like ``LLVM=1``, you
+can pass the suffix using the ``LLVM`` variable::
+
+       make LLVM=-14
+
+which will use ``clang-14``, ``ld.lld-14``, etc.
+
+``LLVM=0`` is not the same as omitting ``LLVM`` altogether, it will behave like
+``LLVM=1``. If you only wish to use certain LLVM utilities, use their respective
+make variables.
+
 The integrated assembler is enabled by default. You can pass ``LLVM_IAS=0`` to
 disable it.
 
index b008b90..11a296e 100644 (file)
@@ -982,6 +982,8 @@ The syntax is quite similar. The difference is to use "userprogs" instead of
 
        When linking bpfilter_umh, it will be passed the extra option -static.
 
+       From command line, :ref:`USERCFLAGS and USERLDFLAGS <userkbuildflags>` will also be used.
+
 5.4 When userspace programs are actually built
 ----------------------------------------------
 
index bfa75ea..9933faa 100644 (file)
@@ -211,9 +211,6 @@ raw_spinlock_t and spinlock_t
 raw_spinlock_t
 --------------
 
-raw_spinlock_t is a strict spinning lock implementation regardless of the
-kernel configuration including PREEMPT_RT enabled kernels.
-
 raw_spinlock_t is a strict spinning lock implementation in all kernels,
 including PREEMPT_RT kernels.  Use raw_spinlock_t only in real critical
 core code, low-level interrupt handling and places where disabling
index f0a6043..3e03283 100644 (file)
@@ -12,6 +12,7 @@ additions to this manual.
    configure-git
    rebasing-and-merging
    pull-requests
+   messy-diffstat
    maintainer-entry-profile
    modifying-patches
 
diff --git a/Documentation/maintainer/messy-diffstat.rst b/Documentation/maintainer/messy-diffstat.rst
new file mode 100644 (file)
index 0000000..c015f66
--- /dev/null
@@ -0,0 +1,96 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+Handling messy pull-request diffstats
+=====================================
+
+Subsystem maintainers routinely use ``git request-pull`` as part of the
+process of sending work upstream.  Normally, the result includes a nice
+diffstat that shows which files will be touched and how much of each will
+be changed.  Occasionally, though, a repository with a relatively
+complicated development history will yield a massive diffstat containing a
+great deal of unrelated work.  The result looks ugly and obscures what the
+pull request is actually doing.  This document describes what is happening
+and how to fix things up; it is derived from The Wisdom of Linus Torvalds,
+found in Linus1_ and Linus2_.
+
+.. _Linus1: https://lore.kernel.org/lkml/CAHk-=wg3wXH2JNxkQi+eLZkpuxqV+wPiHhw_Jf7ViH33Sw7PHA@mail.gmail.com/
+.. _Linus2: https://lore.kernel.org/lkml/CAHk-=wgXbSa8yq8Dht8at+gxb_idnJ7X5qWZQWRBN4_CUPr=eQ@mail.gmail.com/
+
+A Git development history proceeds as a series of commits.  In a simplified
+manner, mainline kernel development looks like this::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+
+If one wants to see what has changed between two points, a command like
+this will do the job::
+
+  $ git diff --stat --summary vN-rc2..vN-rc3
+
+Here, there are two clear points in the history; Git will essentially
+"subtract" the beginning point from the end point and display the resulting
+differences.  The requested operation is unambiguous and easy enough to
+understand.
+
+When a subsystem maintainer creates a branch and commits changes to it, the
+result in the simplest case is a history that looks like::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                          |
+                          +-- c1 --- c2 --- ... --- cN
+
+If that maintainer now uses ``git diff`` to see what has changed between
+the mainline branch (let's call it "linus") and cN, there are still two
+clear endpoints, and the result is as expected.  So a pull request
+generated with ``git request-pull`` will also be as expected.  But now
+consider a slightly more complex development history::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                |         |
+                |         +-- c1 --- c2 --- ... --- cN
+                |                   /
+                +-- x1 --- x2 --- x3
+
+Our maintainer has created one branch at vN-rc1 and another at vN-rc2; the
+two were then subsequently merged into c2.  Now a pull request generated
+for cN may end up being messy indeed, and developers often end up wondering
+why.
+
+What is happening here is that there are no longer two clear end points for
+the ``git diff`` operation to use.  The development culminating in cN
+started in two different places; to generate the diffstat, ``git diff``
+ends up having pick one of them and hoping for the best.  If the diffstat
+starts at vN-rc1, it may end up including all of the changes between there
+and the second origin end point (vN-rc2), which is certainly not what our
+maintainer had in mind.  With all of that extra junk in the diffstat, it
+may be impossible to tell what actually happened in the changes leading up
+to cN.
+
+Maintainers often try to resolve this problem by, for example, rebasing the
+branch or performing another merge with the linus branch, then recreating
+the pull request.  This approach tends not to lead to joy at the receiving
+end of that pull request; rebasing and/or merging just before pushing
+upstream is a well-known way to get a grumpy response.
+
+So what is to be done?  The best response when confronted with this
+situation is to indeed to do a merge with the branch you intend your work
+to be pulled into, but to do it privately, as if it were the source of
+shame.  Create a new, throwaway branch and do the merge there::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                |         |                                      |
+                |         +-- c1 --- c2 --- ... --- cN           |
+                |                   /               |            |
+                +-- x1 --- x2 --- x3                +------------+-- TEMP
+
+The merge operation resolves all of the complications resulting from the
+multiple beginning points, yielding a coherent result that contains only
+the differences from the mainline branch.  Now it will be possible to
+generate a diffstat with the desired information::
+
+  $ git diff -C --stat --summary linus..TEMP
+
+Save the output from this command, then simply delete the TEMP branch;
+definitely do not expose it to the outside world.  Take the saved diffstat
+output and edit it into the messy pull request, yielding a result that
+shows what is really going on.  That request can then be sent upstream.
index ce01713..72cf335 100644 (file)
@@ -1,12 +1,13 @@
 Linux Networking Documentation
 ==============================
 
+Refer to :ref:`netdev-FAQ` for a guide on netdev development process specifics.
+
 Contents:
 
 .. toctree::
    :maxdepth: 2
 
-   netdev-FAQ
    af_xdp
    bareudp
    batman-adv
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
deleted file mode 100644 (file)
index e26532f..0000000
+++ /dev/null
@@ -1,263 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-.. _netdev-FAQ:
-
-==========
-netdev FAQ
-==========
-
-What is netdev?
----------------
-It is a mailing list for all network-related Linux stuff.  This
-includes anything found under net/ (i.e. core code like IPv6) and
-drivers/net (i.e. hardware specific drivers) in the Linux source tree.
-
-Note that some subsystems (e.g. wireless drivers) which have a high
-volume of traffic have their own specific mailing lists.
-
-The netdev list is managed (like many other Linux mailing lists) through
-VGER (http://vger.kernel.org/) and archives can be found below:
-
--  http://marc.info/?l=linux-netdev
--  http://www.spinics.net/lists/netdev/
-
-Aside from subsystems like that mentioned above, all network-related
-Linux development (i.e. RFC, review, comments, etc.) takes place on
-netdev.
-
-How do the changes posted to netdev make their way into Linux?
---------------------------------------------------------------
-There are always two trees (git repositories) in play.  Both are
-driven by David Miller, the main network maintainer.  There is the
-``net`` tree, and the ``net-next`` tree.  As you can probably guess from
-the names, the ``net`` tree is for fixes to existing code already in the
-mainline tree from Linus, and ``net-next`` is where the new code goes
-for the future release.  You can find the trees here:
-
-- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
-- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
-
-How often do changes from these trees make it to the mainline Linus tree?
--------------------------------------------------------------------------
-To understand this, you need to know a bit of background information on
-the cadence of Linux development.  Each new release starts off with a
-two week "merge window" where the main maintainers feed their new stuff
-to Linus for merging into the mainline tree.  After the two weeks, the
-merge window is closed, and it is called/tagged ``-rc1``.  No new
-features get mainlined after this -- only fixes to the rc1 content are
-expected.  After roughly a week of collecting fixes to the rc1 content,
-rc2 is released.  This repeats on a roughly weekly basis until rc7
-(typically; sometimes rc6 if things are quiet, or rc8 if things are in a
-state of churn), and a week after the last vX.Y-rcN was done, the
-official vX.Y is released.
-
-Relating that to netdev: At the beginning of the 2-week merge window,
-the ``net-next`` tree will be closed - no new changes/features.  The
-accumulated new content of the past ~10 weeks will be passed onto
-mainline/Linus via a pull request for vX.Y -- at the same time, the
-``net`` tree will start accumulating fixes for this pulled content
-relating to vX.Y
-
-An announcement indicating when ``net-next`` has been closed is usually
-sent to netdev, but knowing the above, you can predict that in advance.
-
-IMPORTANT: Do not send new ``net-next`` content to netdev during the
-period during which ``net-next`` tree is closed.
-
-Shortly after the two weeks have passed (and vX.Y-rc1 is released), the
-tree for ``net-next`` reopens to collect content for the next (vX.Y+1)
-release.
-
-If you aren't subscribed to netdev and/or are simply unsure if
-``net-next`` has re-opened yet, simply check the ``net-next`` git
-repository link above for any new networking-related commits.  You may
-also check the following website for the current status:
-
-  http://vger.kernel.org/~davem/net-next.html
-
-The ``net`` tree continues to collect fixes for the vX.Y content, and is
-fed back to Linus at regular (~weekly) intervals.  Meaning that the
-focus for ``net`` is on stabilization and bug fixes.
-
-Finally, the vX.Y gets released, and the whole cycle starts over.
-
-So where are we now in this cycle?
-----------------------------------
-
-Load the mainline (Linus) page here:
-
-  https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
-and note the top of the "tags" section.  If it is rc1, it is early in
-the dev cycle.  If it was tagged rc7 a week ago, then a release is
-probably imminent.
-
-How do I indicate which tree (net vs. net-next) my patch should be in?
-----------------------------------------------------------------------
-Firstly, think whether you have a bug fix or new "next-like" content.
-Then once decided, assuming that you use git, use the prefix flag, i.e.
-::
-
-  git format-patch --subject-prefix='PATCH net-next' start..finish
-
-Use ``net`` instead of ``net-next`` (always lower case) in the above for
-bug-fix ``net`` content.  If you don't use git, then note the only magic
-in the above is just the subject text of the outgoing e-mail, and you
-can manually change it yourself with whatever MUA you are comfortable
-with.
-
-I sent a patch and I'm wondering what happened to it - how can I tell whether it got merged?
---------------------------------------------------------------------------------------------
-Start by looking at the main patchworks queue for netdev:
-
-  https://patchwork.kernel.org/project/netdevbpf/list/
-
-The "State" field will tell you exactly where things are at with your
-patch.
-
-The above only says "Under Review".  How can I find out more?
--------------------------------------------------------------
-Generally speaking, the patches get triaged quickly (in less than
-48h).  So be patient.  Asking the maintainer for status updates on your
-patch is a good way to ensure your patch is ignored or pushed to the
-bottom of the priority list.
-
-I submitted multiple versions of the patch series. Should I directly update patchwork for the previous versions of these patch series?
---------------------------------------------------------------------------------------------------------------------------------------
-No, please don't interfere with the patch status on patchwork, leave
-it to the maintainer to figure out what is the most recent and current
-version that should be applied. If there is any doubt, the maintainer
-will reply and ask what should be done.
-
-I made changes to only a few patches in a patch series should I resend only those changed?
-------------------------------------------------------------------------------------------
-No, please resend the entire patch series and make sure you do number your
-patches such that it is clear this is the latest and greatest set of patches
-that can be applied.
-
-I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
-----------------------------------------------------------------------------------------------------------------------------------------
-There is no revert possible, once it is pushed out, it stays like that.
-Please send incremental versions on top of what has been merged in order to fix
-the patches the way they would look like if your latest patch series was to be
-merged.
-
-Are there special rules regarding stable submissions on netdev?
----------------------------------------------------------------
-While it used to be the case that netdev submissions were not supposed
-to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
-the case today. Please follow the standard stable rules in
-:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
-and make sure you include appropriate Fixes tags!
-
-Is the comment style convention different for the networking content?
----------------------------------------------------------------------
-Yes, in a largely trivial way.  Instead of this::
-
-  /*
-   * foobar blah blah blah
-   * another line of text
-   */
-
-it is requested that you make it look like this::
-
-  /* foobar blah blah blah
-   * another line of text
-   */
-
-I am working in existing code that has the former comment style and not the latter. Should I submit new code in the former style or the latter?
------------------------------------------------------------------------------------------------------------------------------------------------
-Make it the latter style, so that eventually all code in the domain
-of netdev is of this format.
-
-I found a bug that might have possible security implications or similar. Should I mail the main netdev maintainer off-list?
----------------------------------------------------------------------------------------------------------------------------
-No. The current netdev maintainer has consistently requested that
-people use the mailing lists and not reach out directly.  If you aren't
-OK with that, then perhaps consider mailing security@kernel.org or
-reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
-as possible alternative mechanisms.
-
-What level of testing is expected before I submit my change?
-------------------------------------------------------------
-If your changes are against ``net-next``, the expectation is that you
-have tested by layering your changes on top of ``net-next``.  Ideally
-you will have done run-time testing specific to your change, but at a
-minimum, your changes should survive an ``allyesconfig`` and an
-``allmodconfig`` build without new warnings or failures.
-
-How do I post corresponding changes to user space components?
--------------------------------------------------------------
-User space code exercising kernel features should be posted
-alongside kernel patches. This gives reviewers a chance to see
-how any new interface is used and how well it works.
-
-When user space tools reside in the kernel repo itself all changes
-should generally come as one series. If series becomes too large
-or the user space project is not reviewed on netdev include a link
-to a public repo where user space patches can be seen.
-
-In case user space tooling lives in a separate repository but is
-reviewed on netdev  (e.g. patches to `iproute2` tools) kernel and
-user space patches should form separate series (threads) when posted
-to the mailing list, e.g.::
-
-  [PATCH net-next 0/3] net: some feature cover letter
-   â””─ [PATCH net-next 1/3] net: some feature prep
-   â””─ [PATCH net-next 2/3] net: some feature do it
-   â””─ [PATCH net-next 3/3] selftest: net: some feature
-
-  [PATCH iproute2-next] ip: add support for some feature
-
-Posting as one thread is discouraged because it confuses patchwork
-(as of patchwork 2.2.2).
-
-Can I reproduce the checks from patchwork on my local machine?
---------------------------------------------------------------
-
-Checks in patchwork are mostly simple wrappers around existing kernel
-scripts, the sources are available at:
-
-https://github.com/kuba-moo/nipa/tree/master/tests
-
-Running all the builds and checks locally is a pain, can I post my patches and have the patchwork bot validate them?
---------------------------------------------------------------------------------------------------------------------
-
-No, you must ensure that your patches are ready by testing them locally
-before posting to the mailing list. The patchwork build bot instance
-gets overloaded very easily and netdev@vger really doesn't need more
-traffic if we can help it.
-
-netdevsim is great, can I extend it for my out-of-tree tests?
--------------------------------------------------------------
-
-No, `netdevsim` is a test vehicle solely for upstream tests.
-(Please add your tests under tools/testing/selftests/.)
-
-We also give no guarantees that `netdevsim` won't change in the future
-in a way which would break what would normally be considered uAPI.
-
-Is netdevsim considered a "user" of an API?
--------------------------------------------
-
-Linux kernel has a long standing rule that no API should be added unless
-it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are
-strongly encouraged when adding new APIs, but `netdevsim` in itself
-is **not** considered a use case/user.
-
-Any other tips to help ensure my net/net-next patch gets OK'd?
---------------------------------------------------------------
-Attention to detail.  Re-read your own work as if you were the
-reviewer.  You can start with using ``checkpatch.pl``, perhaps even with
-the ``--strict`` flag.  But do not be mindlessly robotic in doing so.
-If your change is a bug fix, make sure your commit log indicates the
-end-user visible symptom, the underlying reason as to why it happens,
-and then if necessary, explain why the fix proposed is the best way to
-get things done.  Don't mangle whitespace, and as is common, don't
-mis-indent function arguments that span multiple lines.  If it is your
-first patch, mail it to yourself so you can test apply it to an
-unpatched tree to confirm infrastructure didn't mangle it.
-
-Finally, go back and read
-:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
-to be sure you are not repeating some common mistake documented there.
index 6af1abb..d783060 100644 (file)
@@ -16,3 +16,4 @@ Contents:
    :maxdepth: 2
 
    maintainer-tip
+   maintainer-netdev
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
new file mode 100644 (file)
index 0000000..c456b52
--- /dev/null
@@ -0,0 +1,285 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _netdev-FAQ:
+
+==========
+netdev FAQ
+==========
+
+What is netdev?
+---------------
+It is a mailing list for all network-related Linux stuff.  This
+includes anything found under net/ (i.e. core code like IPv6) and
+drivers/net (i.e. hardware specific drivers) in the Linux source tree.
+
+Note that some subsystems (e.g. wireless drivers) which have a high
+volume of traffic have their own specific mailing lists.
+
+The netdev list is managed (like many other Linux mailing lists) through
+VGER (http://vger.kernel.org/) with archives available at
+https://lore.kernel.org/netdev/
+
+Aside from subsystems like those mentioned above, all network-related
+Linux development (i.e. RFC, review, comments, etc.) takes place on
+netdev.
+
+How do the changes posted to netdev make their way into Linux?
+--------------------------------------------------------------
+There are always two trees (git repositories) in play.  Both are
+driven by David Miller, the main network maintainer.  There is the
+``net`` tree, and the ``net-next`` tree.  As you can probably guess from
+the names, the ``net`` tree is for fixes to existing code already in the
+mainline tree from Linus, and ``net-next`` is where the new code goes
+for the future release.  You can find the trees here:
+
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
+
+How do I indicate which tree (net vs. net-next) my patch should be in?
+----------------------------------------------------------------------
+To help maintainers and CI bots you should explicitly mark which tree
+your patch is targeting. Assuming that you use git, use the prefix
+flag::
+
+  git format-patch --subject-prefix='PATCH net-next' start..finish
+
+Use ``net`` instead of ``net-next`` (always lower case) in the above for
+bug-fix ``net`` content.
+
+How often do changes from these trees make it to the mainline Linus tree?
+-------------------------------------------------------------------------
+To understand this, you need to know a bit of background information on
+the cadence of Linux development.  Each new release starts off with a
+two week "merge window" where the main maintainers feed their new stuff
+to Linus for merging into the mainline tree.  After the two weeks, the
+merge window is closed, and it is called/tagged ``-rc1``.  No new
+features get mainlined after this -- only fixes to the rc1 content are
+expected.  After roughly a week of collecting fixes to the rc1 content,
+rc2 is released.  This repeats on a roughly weekly basis until rc7
+(typically; sometimes rc6 if things are quiet, or rc8 if things are in a
+state of churn), and a week after the last vX.Y-rcN was done, the
+official vX.Y is released.
+
+Relating that to netdev: At the beginning of the 2-week merge window,
+the ``net-next`` tree will be closed - no new changes/features.  The
+accumulated new content of the past ~10 weeks will be passed onto
+mainline/Linus via a pull request for vX.Y -- at the same time, the
+``net`` tree will start accumulating fixes for this pulled content
+relating to vX.Y
+
+An announcement indicating when ``net-next`` has been closed is usually
+sent to netdev, but knowing the above, you can predict that in advance.
+
+.. warning::
+  Do not send new ``net-next`` content to netdev during the
+  period during which ``net-next`` tree is closed.
+
+RFC patches sent for review only are obviously welcome at any time
+(use ``--subject-prefix='RFC net-next'`` with ``git format-patch``).
+
+Shortly after the two weeks have passed (and vX.Y-rc1 is released), the
+tree for ``net-next`` reopens to collect content for the next (vX.Y+1)
+release.
+
+If you aren't subscribed to netdev and/or are simply unsure if
+``net-next`` has re-opened yet, simply check the ``net-next`` git
+repository link above for any new networking-related commits.  You may
+also check the following website for the current status:
+
+  http://vger.kernel.org/~davem/net-next.html
+
+The ``net`` tree continues to collect fixes for the vX.Y content, and is
+fed back to Linus at regular (~weekly) intervals.  Meaning that the
+focus for ``net`` is on stabilization and bug fixes.
+
+Finally, the vX.Y gets released, and the whole cycle starts over.
+
+So where are we now in this cycle?
+----------------------------------
+
+Load the mainline (Linus) page here:
+
+  https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+and note the top of the "tags" section.  If it is rc1, it is early in
+the dev cycle.  If it was tagged rc7 a week ago, then a release is
+probably imminent. If the most recent tag is a final release tag
+(without an ``-rcN`` suffix) - we are most likely in a merge window
+and ``net-next`` is closed.
+
+How can I tell the status of a patch I've sent?
+-----------------------------------------------
+Start by looking at the main patchworks queue for netdev:
+
+  https://patchwork.kernel.org/project/netdevbpf/list/
+
+The "State" field will tell you exactly where things are at with your
+patch. Patches are indexed by the ``Message-ID`` header of the emails
+which carried them so if you have trouble finding your patch append
+the value of ``Message-ID`` to the URL above.
+
+How long before my patch is accepted?
+-------------------------------------
+Generally speaking, the patches get triaged quickly (in less than
+48h). But be patient, if your patch is active in patchwork (i.e. it's
+listed on the project's patch list) the chances it was missed are close to zero.
+Asking the maintainer for status updates on your
+patch is a good way to ensure your patch is ignored or pushed to the
+bottom of the priority list.
+
+Should I directly update patchwork state of my own patches?
+-----------------------------------------------------------
+It may be tempting to help the maintainers and update the state of your
+own patches when you post a new version or spot a bug. Please do not do that.
+Interfering with the patch status on patchwork will only cause confusion. Leave
+it to the maintainer to figure out what is the most recent and current
+version that should be applied. If there is any doubt, the maintainer
+will reply and ask what should be done.
+
+I made changes to only a few patches in a patch series should I resend only those changed?
+------------------------------------------------------------------------------------------
+No, please resend the entire patch series and make sure you do number your
+patches such that it is clear this is the latest and greatest set of patches
+that can be applied.
+
+I have received review feedback, when should I post a revised version of the patches?
+-------------------------------------------------------------------------------------
+Allow at least 24 hours to pass between postings. This will ensure reviewers
+from all geographical locations have a chance to chime in. Do not wait
+too long (weeks) between postings either as it will make it harder for reviewers
+to recall all the context.
+
+Make sure you address all the feedback in your new posting. Do not post a new
+version of the code if the discussion about the previous version is still
+ongoing, unless directly instructed by a reviewer.
+
+I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
+----------------------------------------------------------------------------------------------------------------------------------------
+There is no revert possible, once it is pushed out, it stays like that.
+Please send incremental versions on top of what has been merged in order to fix
+the patches the way they would look like if your latest patch series was to be
+merged.
+
+Are there special rules regarding stable submissions on netdev?
+---------------------------------------------------------------
+While it used to be the case that netdev submissions were not supposed
+to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
+the case today. Please follow the standard stable rules in
+:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
+and make sure you include appropriate Fixes tags!
+
+Is the comment style convention different for the networking content?
+---------------------------------------------------------------------
+Yes, in a largely trivial way.  Instead of this::
+
+  /*
+   * foobar blah blah blah
+   * another line of text
+   */
+
+it is requested that you make it look like this::
+
+  /* foobar blah blah blah
+   * another line of text
+   */
+
+I am working in existing code which uses non-standard formatting. Which formatting should I use?
+------------------------------------------------------------------------------------------------
+Make your code follow the most recent guidelines, so that eventually all code
+in the domain of netdev is in the preferred format.
+
+I found a bug that might have possible security implications or similar. Should I mail the main netdev maintainer off-list?
+---------------------------------------------------------------------------------------------------------------------------
+No. The current netdev maintainer has consistently requested that
+people use the mailing lists and not reach out directly.  If you aren't
+OK with that, then perhaps consider mailing security@kernel.org or
+reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
+as possible alternative mechanisms.
+
+What level of testing is expected before I submit my change?
+------------------------------------------------------------
+At the very minimum your changes must survive an ``allyesconfig`` and an
+``allmodconfig`` build with ``W=1`` set without new warnings or failures.
+
+Ideally you will have done run-time testing specific to your change,
+and the patch series contains a set of kernel selftest for
+``tools/testing/selftests/net`` or using the KUnit framework.
+
+You are expected to test your changes on top of the relevant networking
+tree (``net`` or ``net-next``) and not e.g. a stable tree or ``linux-next``.
+
+How do I post corresponding changes to user space components?
+-------------------------------------------------------------
+User space code exercising kernel features should be posted
+alongside kernel patches. This gives reviewers a chance to see
+how any new interface is used and how well it works.
+
+When user space tools reside in the kernel repo itself all changes
+should generally come as one series. If series becomes too large
+or the user space project is not reviewed on netdev include a link
+to a public repo where user space patches can be seen.
+
+In case user space tooling lives in a separate repository but is
+reviewed on netdev  (e.g. patches to ``iproute2`` tools) kernel and
+user space patches should form separate series (threads) when posted
+to the mailing list, e.g.::
+
+  [PATCH net-next 0/3] net: some feature cover letter
+   â””─ [PATCH net-next 1/3] net: some feature prep
+   â””─ [PATCH net-next 2/3] net: some feature do it
+   â””─ [PATCH net-next 3/3] selftest: net: some feature
+
+  [PATCH iproute2-next] ip: add support for some feature
+
+Posting as one thread is discouraged because it confuses patchwork
+(as of patchwork 2.2.2).
+
+Can I reproduce the checks from patchwork on my local machine?
+--------------------------------------------------------------
+
+Checks in patchwork are mostly simple wrappers around existing kernel
+scripts, the sources are available at:
+
+https://github.com/kuba-moo/nipa/tree/master/tests
+
+Running all the builds and checks locally is a pain, can I post my patches and have the patchwork bot validate them?
+--------------------------------------------------------------------------------------------------------------------
+
+No, you must ensure that your patches are ready by testing them locally
+before posting to the mailing list. The patchwork build bot instance
+gets overloaded very easily and netdev@vger really doesn't need more
+traffic if we can help it.
+
+netdevsim is great, can I extend it for my out-of-tree tests?
+-------------------------------------------------------------
+
+No, ``netdevsim`` is a test vehicle solely for upstream tests.
+(Please add your tests under ``tools/testing/selftests/``.)
+
+We also give no guarantees that ``netdevsim`` won't change in the future
+in a way which would break what would normally be considered uAPI.
+
+Is netdevsim considered a "user" of an API?
+-------------------------------------------
+
+Linux kernel has a long standing rule that no API should be added unless
+it has a real, in-tree user. Mock-ups and tests based on ``netdevsim`` are
+strongly encouraged when adding new APIs, but ``netdevsim`` in itself
+is **not** considered a use case/user.
+
+Any other tips to help ensure my net/net-next patch gets OK'd?
+--------------------------------------------------------------
+Attention to detail.  Re-read your own work as if you were the
+reviewer.  You can start with using ``checkpatch.pl``, perhaps even with
+the ``--strict`` flag.  But do not be mindlessly robotic in doing so.
+If your change is a bug fix, make sure your commit log indicates the
+end-user visible symptom, the underlying reason as to why it happens,
+and then if necessary, explain why the fix proposed is the best way to
+get things done.  Don't mangle whitespace, and as is common, don't
+mis-indent function arguments that span multiple lines.  If it is your
+first patch, mail it to yourself so you can test apply it to an
+unpatched tree to confirm infrastructure didn't mangle it.
+
+Finally, go back and read
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+to be sure you are not repeating some common mistake documented there.
index ea915c1..e23b876 100644 (file)
@@ -7,7 +7,6 @@ RISC-V architecture
 
     boot-image-header
     vm-layout
-    pmu
     patch-acceptance
 
     features
index 4392b3c..b5feb5b 100644 (file)
@@ -128,6 +128,7 @@ class KernelCmd(Directive):
         return out
 
     def nestedParse(self, lines, fname):
+        env = self.state.document.settings.env
         content = ViewList()
         node = nodes.section()
 
@@ -137,7 +138,7 @@ class KernelCmd(Directive):
                 code_block += "\n    " + l
             lines = code_block + "\n\n"
 
-        line_regex = re.compile("^#define LINENO (\S+)\#([0-9]+)$")
+        line_regex = re.compile("^\.\. LINENO (\S+)\#([0-9]+)$")
         ln = 0
         n = 0
         f = fname
@@ -154,6 +155,9 @@ class KernelCmd(Directive):
                     self.do_parse(content, node)
                     content = ViewList()
 
+                    # Add the file to Sphinx build dependencies
+                    env.note_dependency(os.path.abspath(f))
+
                 f = new_f
 
                 # sphinx counts lines from 0
index 8138d69..27b701e 100644 (file)
@@ -33,6 +33,7 @@ u"""
 
 import codecs
 import os
+import re
 import subprocess
 import sys
 
@@ -82,7 +83,7 @@ class KernelFeat(Directive):
 
         env = doc.settings.env
         cwd = path.dirname(doc.current_source)
-        cmd = "get_feat.pl rest --dir "
+        cmd = "get_feat.pl rest --enable-fname --dir "
         cmd += self.arguments[0]
 
         if len(self.arguments) > 1:
@@ -102,7 +103,22 @@ class KernelFeat(Directive):
         shell_env["srctree"] = srctree
 
         lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
-        nodeList = self.nestedParse(lines, fname)
+
+        line_regex = re.compile("^\.\. FILE (\S+)$")
+
+        out_lines = ""
+
+        for line in lines.split("\n"):
+            match = line_regex.search(line)
+            if match:
+                fname = match.group(1)
+
+                # Add the file to Sphinx build dependencies
+                env.note_dependency(os.path.abspath(fname))
+            else:
+                out_lines += line + "\n"
+
+        nodeList = self.nestedParse(out_lines, fname)
         return nodeList
 
     def runCmd(self, cmd, **kwargs):
index f523aa6..abe7680 100755 (executable)
@@ -59,6 +59,7 @@ class KernelInclude(Include):
     u"""KernelInclude (``kernel-include``) directive"""
 
     def run(self):
+        env = self.state.document.settings.env
         path = os.path.realpath(
             os.path.expandvars(self.arguments[0]))
 
@@ -70,6 +71,8 @@ class KernelInclude(Include):
 
         self.arguments[0] = path
 
+        env.note_dependency(os.path.abspath(path))
+
         #return super(KernelInclude, self).run() # won't work, see HINTs in _run()
         return self._run()
 
index 8189c33..9395892 100644 (file)
@@ -130,7 +130,7 @@ class KernelDocDirective(Directive):
             result = ViewList()
 
             lineoffset = 0;
-            line_regex = re.compile("^#define LINENO ([0-9]+)$")
+            line_regex = re.compile("^\.\. LINENO ([0-9]+)$")
             for line in lines:
                 match = line_regex.search(line)
                 if match:
index 24d2b2a..cefdbb7 100644 (file)
@@ -212,7 +212,7 @@ def setupTools(app):
         if convert_cmd:
             kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
         else:
-            kernellog.warn(app,
+            kernellog.verbose(app,
                 "Neither inkscape(1) nor convert(1) found.\n"
                 "For SVG to PDF conversion, "
                 "install either Inkscape (https://inkscape.org/) (preferred) or\n"
@@ -296,8 +296,10 @@ def convert_image(img_node, translator, src_fname=None):
 
         if translator.builder.format == 'latex':
             if not inkscape_cmd and convert_cmd is None:
-                kernellog.verbose(app,
-                                  "no SVG to PDF conversion available / include SVG raw.")
+                kernellog.warn(app,
+                                  "no SVG to PDF conversion available / include SVG raw."
+                                  "\nIncluding large raw SVGs can cause xelatex error."
+                                  "\nInstall Inkscape (preferred) or ImageMagick.")
                 img_node.replace_self(file2literal(src_fname))
             else:
                 dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
index 9a35f50..2c57354 100644 (file)
@@ -1,2 +1,4 @@
+# jinja2>=3.1 is not compatible with Sphinx<4.0
+jinja2<3.1
 sphinx_rtd_theme
 Sphinx==2.4.4
index 9cccd3d..348ee7e 100644 (file)
@@ -49,13 +49,14 @@ might also consider using dev_archdata for this).
 
 ::
 
-  void rproc_shutdown(struct rproc *rproc)
+  int rproc_shutdown(struct rproc *rproc)
 
 Power off a remote processor (previously booted with rproc_boot()).
 In case @rproc is still being used by an additional user(s), then
 this function will just decrement the power refcount and exit,
 without really powering off the device.
 
+Returns 0 on success, and an appropriate error value otherwise.
 Every call to rproc_boot() must (eventually) be accompanied by a call
 to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
 
index d5ad96c..863f67b 100644 (file)
@@ -1193,6 +1193,26 @@ E.g. ``os_close_file()`` is just a wrapper around ``close()``
 which ensures that the userspace function close does not clash
 with similarly named function(s) in the kernel part.
 
+Using UML as a Test Platform
+============================
+
+UML is an excellent test platform for device driver development. As
+with most things UML, "some user assembly may be required". It is
+up to the user to build their emulation environment. UML at present
+provides only the kernel infrastructure.
+
+Part of this infrastructure is the ability to load and parse fdt
+device tree blobs as used in Arm or Open Firmware platforms. These
+are supplied as an optional extra argument to the kernel command
+line::
+
+    dtb=filename
+
+The device tree is loaded and parsed at boottime and is accessible by
+drivers which query it. At this moment in time this facility is
+intended solely for development purposes. UML's own devices do not
+query the device tree.
+
 Security Considerations
 -----------------------
 
index c4de6f8..65204d7 100644 (file)
@@ -125,7 +125,6 @@ Usage
    additional function:
 
        Cull:
-               -c              Cull by comparing stacktrace instead of total block.
                --cull <rules>
                                Specify culling rules.Culling syntax is key[,key[,...]].Choose a
                                multi-letter key from the **STANDARD FORMAT SPECIFIERS** section.
index eae3af1..b280367 100644 (file)
@@ -52,8 +52,13 @@ The infrastructure may also be able to handle other conditions that make pages
 unevictable, either by definition or by circumstance, in the future.
 
 
-The Unevictable Page List
--------------------------
+The Unevictable LRU Page List
+-----------------------------
+
+The Unevictable LRU page list is a lie.  It was never an LRU-ordered list, but a
+companion to the LRU-ordered anonymous and file, active and inactive page lists;
+and now it is not even a page list.  But following familiar convention, here in
+this document and in the source, we often imagine it as a fifth LRU page list.
 
 The Unevictable LRU infrastructure consists of an additional, per-node, LRU list
 called the "unevictable" list and an associated page flag, PG_unevictable, to
@@ -63,8 +68,8 @@ The PG_unevictable flag is analogous to, and mutually exclusive with, the
 PG_active flag in that it indicates on which LRU list a page resides when
 PG_lru is set.
 
-The Unevictable LRU infrastructure maintains unevictable pages on an additional
-LRU list for a few reasons:
+The Unevictable LRU infrastructure maintains unevictable pages as if they were
+on an additional LRU list for a few reasons:
 
  (1) We get to "treat unevictable pages just like we treat other pages in the
      system - which means we get to use the same code to manipulate them, the
@@ -72,13 +77,11 @@ LRU list for a few reasons:
      of the statistics, etc..." [Rik van Riel]
 
  (2) We want to be able to migrate unevictable pages between nodes for memory
-     defragmentation, workload management and memory hotplug.  The linux kernel
+     defragmentation, workload management and memory hotplug.  The Linux kernel
      can only migrate pages that it can successfully isolate from the LRU
-     lists.  If we were to maintain pages elsewhere than on an LRU-like list,
-     where they can be found by isolate_lru_page(), we would prevent their
-     migration, unless we reworked migration code to find the unevictable pages
-     itself.
-
+     lists (or "Movable" pages: outside of consideration here).  If we were to
+     maintain pages elsewhere than on an LRU-like list, where they can be
+     detected by isolate_lru_page(), we would prevent their migration.
 
 The unevictable list does not differentiate between file-backed and anonymous,
 swap-backed pages.  This differentiation is only important while the pages are,
@@ -92,8 +95,8 @@ Memory Control Group Interaction
 --------------------------------
 
 The unevictable LRU facility interacts with the memory control group [aka
-memory controller; see Documentation/admin-guide/cgroup-v1/memory.rst] by extending the
-lru_list enum.
+memory controller; see Documentation/admin-guide/cgroup-v1/memory.rst] by
+extending the lru_list enum.
 
 The memory controller data structure automatically gets a per-node unevictable
 list as a result of the "arrayification" of the per-node LRU lists (one per
@@ -143,7 +146,6 @@ These are currently used in three places in the kernel:
      and this mark remains for the life of the inode.
 
  (2) By SYSV SHM to mark SHM_LOCK'd address spaces until SHM_UNLOCK is called.
-
      Note that SHM_LOCK is not required to page in the locked pages if they're
      swapped out; the application must touch the pages manually if it wants to
      ensure they're in memory.
@@ -156,19 +158,19 @@ These are currently used in three places in the kernel:
 Detecting Unevictable Pages
 ---------------------------
 
-The function page_evictable() in vmscan.c determines whether a page is
+The function page_evictable() in mm/internal.h determines whether a page is
 evictable or not using the query function outlined above [see section
 :ref:`Marking address spaces unevictable <mark_addr_space_unevict>`]
 to check the AS_UNEVICTABLE flag.
 
 For address spaces that are so marked after being populated (as SHM regions
-might be), the lock action (eg: SHM_LOCK) can be lazy, and need not populate
+might be), the lock action (e.g. SHM_LOCK) can be lazy, and need not populate
 the page tables for the region as does, for example, mlock(), nor need it make
 any special effort to push any pages in the SHM_LOCK'd area to the unevictable
 list.  Instead, vmscan will do this if and when it encounters the pages during
 a reclamation scan.
 
-On an unlock action (such as SHM_UNLOCK), the unlocker (eg: shmctl()) must scan
+On an unlock action (such as SHM_UNLOCK), the unlocker (e.g. shmctl()) must scan
 the pages in the region and "rescue" them from the unevictable list if no other
 condition is keeping them unevictable.  If an unevictable region is destroyed,
 the pages are also "rescued" from the unevictable list in the process of
@@ -176,7 +178,7 @@ freeing them.
 
 page_evictable() also checks for mlocked pages by testing an additional page
 flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is
-faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED.
+faulted into a VM_LOCKED VMA, or found in a VMA being VM_LOCKED.
 
 
 Vmscan's Handling of Unevictable Pages
@@ -186,28 +188,23 @@ If unevictable pages are culled in the fault path, or moved to the unevictable
 list at mlock() or mmap() time, vmscan will not encounter the pages until they
 have become evictable again (via munlock() for example) and have been "rescued"
 from the unevictable list.  However, there may be situations where we decide,
-for the sake of expediency, to leave a unevictable page on one of the regular
+for the sake of expediency, to leave an unevictable page on one of the regular
 active/inactive LRU lists for vmscan to deal with.  vmscan checks for such
 pages in all of the shrink_{active|inactive|page}_list() functions and will
 "cull" such pages that it encounters: that is, it diverts those pages to the
-unevictable list for the node being scanned.
+unevictable list for the memory cgroup and node being scanned.
 
 There may be situations where a page is mapped into a VM_LOCKED VMA, but the
 page is not marked as PG_mlocked.  Such pages will make it all the way to
-shrink_page_list() where they will be detected when vmscan walks the reverse
-map in try_to_unmap().  If try_to_unmap() returns SWAP_MLOCK,
-shrink_page_list() will cull the page at that point.
+shrink_active_list() or shrink_page_list() where they will be detected when
+vmscan walks the reverse map in page_referenced() or try_to_unmap().  The page
+is culled to the unevictable list when it is released by the shrinker.
 
 To "cull" an unevictable page, vmscan simply puts the page back on the LRU list
 using putback_lru_page() - the inverse operation to isolate_lru_page() - after
 dropping the page lock.  Because the condition which makes the page unevictable
-may change once the page is unlocked, putback_lru_page() will recheck the
-unevictable state of a page that it places on the unevictable list.  If the
-page has become unevictable, putback_lru_page() removes it from the list and
-retries, including the page_unevictable() test.  Because such a race is a rare
-event and movement of pages onto the unevictable list should be rare, these
-extra evictabilty checks should not occur in the majority of calls to
-putback_lru_page().
+may change once the page is unlocked, __pagevec_lru_add_fn() will recheck the
+unevictable state of a page before placing it on the unevictable list.
 
 
 MLOCKED Pages
@@ -227,16 +224,25 @@ Nick posted his patch as an alternative to a patch posted by Christoph Lameter
 to achieve the same objective: hiding mlocked pages from vmscan.
 
 In Nick's patch, he used one of the struct page LRU list link fields as a count
-of VM_LOCKED VMAs that map the page.  This use of the link field for a count
-prevented the management of the pages on an LRU list, and thus mlocked pages
-were not migratable as isolate_lru_page() could not find them, and the LRU list
-link field was not available to the migration subsystem.
+of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years
+earlier).  But this use of the link field for a count prevented the management
+of the pages on an LRU list, and thus mlocked pages were not migratable as
+isolate_lru_page() could not detect them, and the LRU list link field was not
+available to the migration subsystem.
 
-Nick resolved this by putting mlocked pages back on the lru list before
+Nick resolved this by putting mlocked pages back on the LRU list before
 attempting to isolate them, thus abandoning the count of VM_LOCKED VMAs.  When
 Nick's patch was integrated with the Unevictable LRU work, the count was
-replaced by walking the reverse map to determine whether any VM_LOCKED VMAs
-mapped the page.  More on this below.
+replaced by walking the reverse map when munlocking, to determine whether any
+other VM_LOCKED VMAs still mapped the page.
+
+However, walking the reverse map for each page when munlocking was ugly and
+inefficient, and could lead to catastrophic contention on a file's rmap lock,
+when many processes which had it mlocked were trying to exit.  In 5.18, the
+idea of keeping mlock_count in Unevictable LRU list link field was revived and
+put to work, without preventing the migration of mlocked pages.  This is why
+the "Unevictable LRU list" cannot be a linked list of pages now; but there was
+no use for that linked list anyway - though its size is maintained for meminfo.
 
 
 Basic Management
@@ -250,22 +256,18 @@ PageMlocked() functions.
 A PG_mlocked page will be placed on the unevictable list when it is added to
 the LRU.  Such pages can be "noticed" by memory management in several places:
 
- (1) in the mlock()/mlockall() system call handlers;
+ (1) in the mlock()/mlock2()/mlockall() system call handlers;
 
  (2) in the mmap() system call handler when mmapping a region with the
      MAP_LOCKED flag;
 
  (3) mmapping a region in a task that has called mlockall() with the MCL_FUTURE
-     flag
+     flag;
 
- (4) in the fault path, if mlocked pages are "culled" in the fault path,
-     and when a VM_LOCKED stack segment is expanded; or
+ (4) in the fault path and when a VM_LOCKED stack segment is expanded; or
 
  (5) as mentioned above, in vmscan:shrink_page_list() when attempting to
-     reclaim a page in a VM_LOCKED VMA via try_to_unmap()
-
-all of which result in the VM_LOCKED flag being set for the VMA if it doesn't
-already have it set.
+     reclaim a page in a VM_LOCKED VMA by page_referenced() or try_to_unmap().
 
 mlocked pages become unlocked and rescued from the unevictable list when:
 
@@ -280,51 +282,53 @@ mlocked pages become unlocked and rescued from the unevictable list when:
  (4) before a page is COW'd in a VM_LOCKED VMA.
 
 
-mlock()/mlockall() System Call Handling
----------------------------------------
+mlock()/mlock2()/mlockall() System Call Handling
+------------------------------------------------
 
-Both [do\_]mlock() and [do\_]mlockall() system call handlers call mlock_fixup()
+mlock(), mlock2() and mlockall() system call handlers proceed to mlock_fixup()
 for each VMA in the range specified by the call.  In the case of mlockall(),
 this is the entire active address space of the task.  Note that mlock_fixup()
 is used for both mlocking and munlocking a range of memory.  A call to mlock()
-an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED is
-treated as a no-op, and mlock_fixup() simply returns.
+an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED, is
+treated as a no-op and mlock_fixup() simply returns.
 
-If the VMA passes some filtering as described in "Filtering Special Vmas"
+If the VMA passes some filtering as described in "Filtering Special VMAs"
 below, mlock_fixup() will attempt to merge the VMA with its neighbors or split
-off a subset of the VMA if the range does not cover the entire VMA.  Once the
-VMA has been merged or split or neither, mlock_fixup() will call
-populate_vma_page_range() to fault in the pages via get_user_pages() and to
-mark the pages as mlocked via mlock_vma_page().
+off a subset of the VMA if the range does not cover the entire VMA.  Any pages
+already present in the VMA are then marked as mlocked by mlock_page() via
+mlock_pte_range() via walk_page_range() via mlock_vma_pages_range().
+
+Before returning from the system call, do_mlock() or mlockall() will call
+__mm_populate() to fault in the remaining pages via get_user_pages() and to
+mark those pages as mlocked as they are faulted.
 
 Note that the VMA being mlocked might be mapped with PROT_NONE.  In this case,
 get_user_pages() will be unable to fault in the pages.  That's okay.  If pages
-do end up getting faulted into this VM_LOCKED VMA, we'll handle them in the
-fault path or in vmscan.
-
-Also note that a page returned by get_user_pages() could be truncated or
-migrated out from under us, while we're trying to mlock it.  To detect this,
-populate_vma_page_range() checks page_mapping() after acquiring the page lock.
-If the page is still associated with its mapping, we'll go ahead and call
-mlock_vma_page().  If the mapping is gone, we just unlock the page and move on.
-In the worst case, this will result in a page mapped in a VM_LOCKED VMA
-remaining on a normal LRU list without being PageMlocked().  Again, vmscan will
-detect and cull such pages.
-
-mlock_vma_page() will call TestSetPageMlocked() for each page returned by
-get_user_pages().  We use TestSetPageMlocked() because the page might already
-be mlocked by another task/VMA and we don't want to do extra work.  We
-especially do not want to count an mlocked page more than once in the
-statistics.  If the page was already mlocked, mlock_vma_page() need do nothing
-more.
-
-If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the
-page from the LRU, as it is likely on the appropriate active or inactive list
-at that time.  If the isolate_lru_page() succeeds, mlock_vma_page() will put
-back the page - by calling putback_lru_page() - which will notice that the page
-is now mlocked and divert the page to the node's unevictable list.  If
-mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle
-it later if and when it attempts to reclaim the page.
+do end up getting faulted into this VM_LOCKED VMA, they will be handled in the
+fault path - which is also how mlock2()'s MLOCK_ONFAULT areas are handled.
+
+For each PTE (or PMD) being faulted into a VMA, the page add rmap function
+calls mlock_vma_page(), which calls mlock_page() when the VMA is VM_LOCKED
+(unless it is a PTE mapping of a part of a transparent huge page).  Or when
+it is a newly allocated anonymous page, lru_cache_add_inactive_or_unevictable()
+calls mlock_new_page() instead: similar to mlock_page(), but can make better
+judgments, since this page is held exclusively and known not to be on LRU yet.
+
+mlock_page() sets PageMlocked immediately, then places the page on the CPU's
+mlock pagevec, to batch up the rest of the work to be done under lru_lock by
+__mlock_page().  __mlock_page() sets PageUnevictable, initializes mlock_count
+and moves the page to unevictable state ("the unevictable LRU", but with
+mlock_count in place of LRU threading).  Or if the page was already PageLRU
+and PageUnevictable and PageMlocked, it simply increments the mlock_count.
+
+But in practice that may not work ideally: the page may not yet be on an LRU, or
+it may have been temporarily isolated from LRU.  In such cases the mlock_count
+field cannot be touched, but will be set to 0 later when __pagevec_lru_add_fn()
+returns the page to "LRU".  Races prohibit mlock_count from being set to 1 then:
+rather than risk stranding a page indefinitely as unevictable, always err with
+mlock_count on the low side, so that when munlocked the page will be rescued to
+an evictable LRU, then perhaps be mlocked again later if vmscan finds it in a
+VM_LOCKED VMA.
 
 
 Filtering Special VMAs
@@ -339,68 +343,48 @@ mlock_fixup() filters several classes of "special" VMAs:
    so there is no sense in attempting to visit them.
 
 2) VMAs mapping hugetlbfs page are already effectively pinned into memory.  We
-   neither need nor want to mlock() these pages.  However, to preserve the
-   prior behavior of mlock() - before the unevictable/mlock changes -
-   mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to
-   allocate the huge pages and populate the ptes.
+   neither need nor want to mlock() these pages.  But __mm_populate() includes
+   hugetlbfs ranges, allocating the huge pages and populating the PTEs.
 
 3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages,
-   such as the VDSO page, relay channel pages, etc. These pages
-   are inherently unevictable and are not managed on the LRU lists.
-   mlock_fixup() treats these VMAs the same as hugetlbfs VMAs.  It calls
-   make_pages_present() to populate the ptes.
+   such as the VDSO page, relay channel pages, etc.  These pages are inherently
+   unevictable and are not managed on the LRU lists.  __mm_populate() includes
+   these ranges, populating the PTEs if not already populated.
+
+4) VMAs with VM_MIXEDMAP set are not marked VM_LOCKED, but __mm_populate()
+   includes these ranges, populating the PTEs if not already populated.
 
 Note that for all of these special VMAs, mlock_fixup() does not set the
 VM_LOCKED flag.  Therefore, we won't have to deal with them later during
 munlock(), munmap() or task exit.  Neither does mlock_fixup() account these
 VMAs against the task's "locked_vm".
 
-.. _munlock_munlockall_handling:
 
 munlock()/munlockall() System Call Handling
 -------------------------------------------
 
-The munlock() and munlockall() system calls are handled by the same functions -
-do_mlock[all]() - as the mlock() and mlockall() system calls with the unlock vs
-lock operation indicated by an argument.  So, these system calls are also
-handled by mlock_fixup().  Again, if called for an already munlocked VMA,
-mlock_fixup() simply returns.  Because of the VMA filtering discussed above,
-VM_LOCKED will not be set in any "special" VMAs.  So, these VMAs will be
-ignored for munlock.
+The munlock() and munlockall() system calls are handled by the same
+mlock_fixup() function as mlock(), mlock2() and mlockall() system calls are.
+If called to munlock an already munlocked VMA, mlock_fixup() simply returns.
+Because of the VMA filtering discussed above, VM_LOCKED will not be set in
+any "special" VMAs.  So, those VMAs will be ignored for munlock.
 
 If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the
-specified range.  The range is then munlocked via the function
-populate_vma_page_range() - the same function used to mlock a VMA range -
-passing a flag to indicate that munlock() is being performed.
-
-Because the VMA access protections could have been changed to PROT_NONE after
-faulting in and mlocking pages, get_user_pages() was unreliable for visiting
-these pages for munlocking.  Because we don't want to leave pages mlocked,
-get_user_pages() was enhanced to accept a flag to ignore the permissions when
-fetching the pages - all of which should be resident as a result of previous
-mlocking.
-
-For munlock(), populate_vma_page_range() unlocks individual pages by calling
-munlock_vma_page().  munlock_vma_page() unconditionally clears the PG_mlocked
-flag using TestClearPageMlocked().  As with mlock_vma_page(),
-munlock_vma_page() use the Test*PageMlocked() function to handle the case where
-the page might have already been unlocked by another task.  If the page was
-mlocked, munlock_vma_page() updates that zone statistics for the number of
-mlocked pages.  Note, however, that at this point we haven't checked whether
-the page is mapped by other VM_LOCKED VMAs.
-
-We can't call page_mlock(), the function that walks the reverse map to
-check for other VM_LOCKED VMAs, without first isolating the page from the LRU.
-page_mlock() is a variant of try_to_unmap() and thus requires that the page
-not be on an LRU list [more on these below].  However, the call to
-isolate_lru_page() could fail, in which case we can't call page_mlock().  So,
-we go ahead and clear PG_mlocked up front, as this might be the only chance we
-have.  If we can successfully isolate the page, we go ahead and call
-page_mlock(), which will restore the PG_mlocked flag and update the zone
-page statistics if it finds another VMA holding the page mlocked.  If we fail
-to isolate the page, we'll have left a potentially mlocked page on the LRU.
-This is fine, because we'll catch it later if and if vmscan tries to reclaim
-the page.  This should be relatively rare.
+specified range.  All pages in the VMA are then munlocked by munlock_page() via
+mlock_pte_range() via walk_page_range() via mlock_vma_pages_range() - the same
+function used when mlocking a VMA range, with new flags for the VMA indicating
+that it is munlock() being performed.
+
+munlock_page() uses the mlock pagevec to batch up work to be done under
+lru_lock by  __munlock_page().  __munlock_page() decrements the page's
+mlock_count, and when that reaches 0 it clears PageMlocked and clears
+PageUnevictable, moving the page from unevictable state to inactive LRU.
+
+But in practice that may not work ideally: the page may not yet have reached
+"the unevictable LRU", or it may have been temporarily isolated from it.  In
+those cases its mlock_count field is unusable and must be assumed to be 0: so
+that the page will be rescued to an evictable LRU, then perhaps be mlocked
+again later if vmscan finds it in a VM_LOCKED VMA.
 
 
 Migrating MLOCKED Pages
@@ -410,33 +394,38 @@ A page that is being migrated has been isolated from the LRU lists and is held
 locked across unmapping of the page, updating the page's address space entry
 and copying the contents and state, until the page table entry has been
 replaced with an entry that refers to the new page.  Linux supports migration
-of mlocked pages and other unevictable pages.  This involves simply moving the
-PG_mlocked and PG_unevictable states from the old page to the new page.
+of mlocked pages and other unevictable pages.  PG_mlocked is cleared from the
+the old page when it is unmapped from the last VM_LOCKED VMA, and set when the
+new page is mapped in place of migration entry in a VM_LOCKED VMA.  If the page
+was unevictable because mlocked, PG_unevictable follows PG_mlocked; but if the
+page was unevictable for other reasons, PG_unevictable is copied explicitly.
 
 Note that page migration can race with mlocking or munlocking of the same page.
-This has been discussed from the mlock/munlock perspective in the respective
-sections above.  Both processes (migration and m[un]locking) hold the page
-locked.  This provides the first level of synchronization.  Page migration
-zeros out the page_mapping of the old page before unlocking it, so m[un]lock
-can skip these pages by testing the page mapping under page lock.
+There is mostly no problem since page migration requires unmapping all PTEs of
+the old page (including munlock where VM_LOCKED), then mapping in the new page
+(including mlock where VM_LOCKED).  The page table locks provide sufficient
+synchronization.
 
-To complete page migration, we place the new and old pages back onto the LRU
-after dropping the page lock.  The "unneeded" page - old page on success, new
-page on failure - will be freed when the reference count held by the migration
-process is released.  To ensure that we don't strand pages on the unevictable
-list because of a race between munlock and migration, page migration uses the
-putback_lru_page() function to add migrated pages back to the LRU.
+However, since mlock_vma_pages_range() starts by setting VM_LOCKED on a VMA,
+before mlocking any pages already present, if one of those pages were migrated
+before mlock_pte_range() reached it, it would get counted twice in mlock_count.
+To prevent that, mlock_vma_pages_range() temporarily marks the VMA as VM_IO,
+so that mlock_vma_page() will skip it.
+
+To complete page migration, we place the old and new pages back onto the LRU
+afterwards.  The "unneeded" page - old page on success, new page on failure -
+is freed when the reference count held by the migration process is released.
 
 
 Compacting MLOCKED Pages
 ------------------------
 
-The unevictable LRU can be scanned for compactable regions and the default
-behavior is to do so.  /proc/sys/vm/compact_unevictable_allowed controls
-this behavior (see Documentation/admin-guide/sysctl/vm.rst).  Once scanning of the
-unevictable LRU is enabled, the work of compaction is mostly handled by
-the page migration code and the same work flow as described in MIGRATING
-MLOCKED PAGES will apply.
+The memory map can be scanned for compactable regions and the default behavior
+is to let unevictable pages be moved.  /proc/sys/vm/compact_unevictable_allowed
+controls this behavior (see Documentation/admin-guide/sysctl/vm.rst).  The work
+of compaction is mostly handled by the page migration code and the same work
+flow as described in Migrating MLOCKED Pages will apply.
+
 
 MLOCKING Transparent Huge Pages
 -------------------------------
@@ -445,51 +434,44 @@ A transparent huge page is represented by a single entry on an LRU list.
 Therefore, we can only make unevictable an entire compound page, not
 individual subpages.
 
-If a user tries to mlock() part of a huge page, we want the rest of the
-page to be reclaimable.
+If a user tries to mlock() part of a huge page, and no user mlock()s the
+whole of the huge page, we want the rest of the page to be reclaimable.
 
 We cannot just split the page on partial mlock() as split_huge_page() can
-fail and new intermittent failure mode for the syscall is undesirable.
+fail and new intermittent failure mode for the syscall is undesirable.
 
-We handle this by keeping PTE-mapped huge pages on normal LRU lists: the
-PMD on border of VM_LOCKED VMA will be split into PTE table.
+We handle this by keeping PTE-mlocked huge pages on evictable LRU lists:
+the PMD on the border of a VM_LOCKED VMA will be split into a PTE table.
 
-This way the huge page is accessible for vmscan. Under memory pressure the
+This way the huge page is accessible for vmscan.  Under memory pressure the
 page will be split, subpages which belong to VM_LOCKED VMAs will be moved
-to unevictable LRU and the rest can be reclaimed.
+to the unevictable LRU and the rest can be reclaimed.
+
+/proc/meminfo's Unevictable and Mlocked amounts do not include those parts
+of a transparent huge page which are mapped only by PTEs in VM_LOCKED VMAs.
 
-See also comment in follow_trans_huge_pmd().
 
 mmap(MAP_LOCKED) System Call Handling
 -------------------------------------
 
-In addition the mlock()/mlockall() system calls, an application can request
-that a region of memory be mlocked supplying the MAP_LOCKED flag to the mmap()
-call. There is one important and subtle difference here, though. mmap() + mlock()
-will fail if the range cannot be faulted in (e.g. because mm_populate fails)
-and returns with ENOMEM while mmap(MAP_LOCKED) will not fail. The mmaped
-area will still have properties of the locked area - aka. pages will not get
-swapped out - but major page faults to fault memory in might still happen.
+In addition to the mlock(), mlock2() and mlockall() system calls, an application
+can request that a region of memory be mlocked by supplying the MAP_LOCKED flag
+to the mmap() call.  There is one important and subtle difference here, though.
+mmap() + mlock() will fail if the range cannot be faulted in (e.g. because
+mm_populate fails) and returns with ENOMEM while mmap(MAP_LOCKED) will not fail.
+The mmaped area will still have properties of the locked area - pages will not
+get swapped out - but major page faults to fault memory in might still happen.
 
-Furthermore, any mmap() call or brk() call that expands the heap by a
-task that has previously called mlockall() with the MCL_FUTURE flag will result
+Furthermore, any mmap() call or brk() call that expands the heap by a task
+that has previously called mlockall() with the MCL_FUTURE flag will result
 in the newly mapped memory being mlocked.  Before the unevictable/mlock
-changes, the kernel simply called make_pages_present() to allocate pages and
-populate the page table.
+changes, the kernel simply called make_pages_present() to allocate pages
+and populate the page table.
 
-To mlock a range of memory under the unevictable/mlock infrastructure, the
-mmap() handler and task address space expansion functions call
+To mlock a range of memory under the unevictable/mlock infrastructure,
+the mmap() handler and task address space expansion functions call
 populate_vma_page_range() specifying the vma and the address range to mlock.
 
-The callers of populate_vma_page_range() will have already added the memory range
-to be mlocked to the task's "locked_vm".  To account for filtered VMAs,
-populate_vma_page_range() returns the number of pages NOT mlocked.  All of the
-callers then subtract a non-negative return value from the task's locked_vm.  A
-negative return value represent an error - for example, from get_user_pages()
-attempting to fault in a VMA with PROT_NONE access.  In this case, we leave the
-memory range accounted as locked_vm, as the protections could be changed later
-and pages allocated into that region.
-
 
 munmap()/exit()/exec() System Call Handling
 -------------------------------------------
@@ -500,81 +482,53 @@ munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages.
 Before the unevictable/mlock changes, mlocking did not mark the pages in any
 way, so unmapping them required no processing.
 
-To munlock a range of memory under the unevictable/mlock infrastructure, the
-munmap() handler and task address space call tear down function
-munlock_vma_pages_all().  The name reflects the observation that one always
-specifies the entire VMA range when munlock()ing during unmap of a region.
-Because of the VMA filtering when mlocking() regions, only "normal" VMAs that
-actually contain mlocked pages will be passed to munlock_vma_pages_all().
-
-munlock_vma_pages_all() clears the VM_LOCKED VMA flag and, like mlock_fixup()
-for the munlock case, calls __munlock_vma_pages_range() to walk the page table
-for the VMA's memory range and munlock_vma_page() each resident page mapped by
-the VMA.  This effectively munlocks the page, only if this is the last
-VM_LOCKED VMA that maps the page.
-
-
-try_to_unmap()
---------------
-
-Pages can, of course, be mapped into multiple VMAs.  Some of these VMAs may
-have VM_LOCKED flag set.  It is possible for a page mapped into one or more
-VM_LOCKED VMAs not to have the PG_mlocked flag set and therefore reside on one
-of the active or inactive LRU lists.  This could happen if, for example, a task
-in the process of munlocking the page could not isolate the page from the LRU.
-As a result, vmscan/shrink_page_list() might encounter such a page as described
-in section "vmscan's handling of unevictable pages".  To handle this situation,
-try_to_unmap() checks for VM_LOCKED VMAs while it is walking a page's reverse
-map.
-
-try_to_unmap() is always called, by either vmscan for reclaim or for page
-migration, with the argument page locked and isolated from the LRU.  Separate
-functions handle anonymous and mapped file and KSM pages, as these types of
-pages have different reverse map lookup mechanisms, with different locking.
-In each case, whether rmap_walk_anon() or rmap_walk_file() or rmap_walk_ksm(),
-it will call try_to_unmap_one() for every VMA which might contain the page.
-
-When trying to reclaim, if try_to_unmap_one() finds the page in a VM_LOCKED
-VMA, it will then mlock the page via mlock_vma_page() instead of unmapping it,
-and return SWAP_MLOCK to indicate that the page is unevictable: and the scan
-stops there.
-
-mlock_vma_page() is called while holding the page table's lock (in addition
-to the page lock, and the rmap lock): to serialize against concurrent mlock or
-munlock or munmap system calls, mm teardown (munlock_vma_pages_all), reclaim,
-holepunching, and truncation of file pages and their anonymous COWed pages.
-
-
-page_mlock() Reverse Map Scan
----------------------------------
-
-When munlock_vma_page() [see section :ref:`munlock()/munlockall() System Call
-Handling <munlock_munlockall_handling>` above] tries to munlock a
-page, it needs to determine whether or not the page is mapped by any
-VM_LOCKED VMA without actually attempting to unmap all PTEs from the
-page.  For this purpose, the unevictable/mlock infrastructure
-introduced a variant of try_to_unmap() called page_mlock().
-
-page_mlock() walks the respective reverse maps looking for VM_LOCKED VMAs. When
-such a VMA is found the page is mlocked via mlock_vma_page(). This undoes the
-pre-clearing of the page's PG_mlocked done by munlock_vma_page.
-
-Note that page_mlock()'s reverse map walk must visit every VMA in a page's
-reverse map to determine that a page is NOT mapped into any VM_LOCKED VMA.
-However, the scan can terminate when it encounters a VM_LOCKED VMA.
-Although page_mlock() might be called a great many times when munlocking a
-large region or tearing down a large address space that has been mlocked via
-mlockall(), overall this is a fairly rare event.
+For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
+(unless it was a PTE mapping of a part of a transparent huge page).
+
+munlock_page() uses the mlock pagevec to batch up work to be done under
+lru_lock by  __munlock_page().  __munlock_page() decrements the page's
+mlock_count, and when that reaches 0 it clears PageMlocked and clears
+PageUnevictable, moving the page from unevictable state to inactive LRU.
+
+But in practice that may not work ideally: the page may not yet have reached
+"the unevictable LRU", or it may have been temporarily isolated from it.  In
+those cases its mlock_count field is unusable and must be assumed to be 0: so
+that the page will be rescued to an evictable LRU, then perhaps be mlocked
+again later if vmscan finds it in a VM_LOCKED VMA.
+
+
+Truncating MLOCKED Pages
+------------------------
+
+File truncation or hole punching forcibly unmaps the deleted pages from
+userspace; truncation even unmaps and deletes any private anonymous pages
+which had been Copied-On-Write from the file pages now being truncated.
+
+Mlocked pages can be munlocked and deleted in this way: like with munmap(),
+for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
+(unless it was a PTE mapping of a part of a transparent huge page).
+
+However, if there is a racing munlock(), since mlock_vma_pages_range() starts
+munlocking by clearing VM_LOCKED from a VMA, before munlocking all the pages
+present, if one of those pages were unmapped by truncation or hole punch before
+mlock_pte_range() reached it, it would not be recognized as mlocked by this VMA,
+and would not be counted out of mlock_count.  In this rare case, a page may
+still appear as PageMlocked after it has been fully unmapped: and it is left to
+release_pages() (or __page_cache_release()) to clear it and update statistics
+before freeing (this event is counted in /proc/vmstat unevictable_pgs_cleared,
+which is usually 0).
 
 
 Page Reclaim in shrink_*_list()
 -------------------------------
 
-shrink_active_list() culls any obviously unevictable pages - i.e.
-!page_evictable(page) - diverting these to the unevictable list.
+vmscan's shrink_active_list() culls any obviously unevictable pages -
+i.e. !page_evictable(page) pages - diverting those to the unevictable list.
 However, shrink_active_list() only sees unevictable pages that made it onto the
-active/inactive lru lists.  Note that these pages do not have PageUnevictable
-set - otherwise they would be on the unevictable list and shrink_active_list
+active/inactive LRU lists.  Note that these pages do not have PageUnevictable
+set - otherwise they would be on the unevictable list and shrink_active_list()
 would never see them.
 
 Some examples of these unevictable pages on the LRU lists are:
@@ -586,20 +540,15 @@ Some examples of these unevictable pages on the LRU lists are:
      when an application accesses the page the first time after SHM_LOCK'ing
      the segment.
 
- (3) mlocked pages that could not be isolated from the LRU and moved to the
-     unevictable list in mlock_vma_page().
-
-shrink_inactive_list() also diverts any unevictable pages that it finds on the
-inactive lists to the appropriate node's unevictable list.
+ (3) pages still mapped into VM_LOCKED VMAs, which should be marked mlocked,
+     but events left mlock_count too low, so they were munlocked too early.
 
-shrink_inactive_list() should only see SHM_LOCK'd pages that became SHM_LOCK'd
-after shrink_active_list() had moved them to the inactive list, or pages mapped
-into VM_LOCKED VMAs that munlock_vma_page() couldn't isolate from the LRU to
-recheck via page_mlock().  shrink_inactive_list() won't notice the latter,
-but will pass on to shrink_page_list().
+vmscan's shrink_inactive_list() and shrink_page_list() also divert obviously
+unevictable pages found on the inactive lists to the appropriate memory cgroup
+and node unevictable list.
 
-shrink_page_list() again culls obviously unevictable pages that it could
-encounter for similar reason to shrink_inactive_list().  Pages mapped into
-VM_LOCKED VMAs but without PG_mlocked set will make it all the way to
-try_to_unmap().  shrink_page_list() will divert them to the unevictable list
-when try_to_unmap() returns SWAP_MLOCK, as discussed above.
+rmap's page_referenced_one(), called via vmscan's shrink_active_list() or
+shrink_page_list(), and rmap's try_to_unmap_one() called via shrink_page_list(),
+check for (3) pages still mapped into VM_LOCKED VMAs, and call mlock_vma_page()
+to correct them.  Such pages are culled to the unevictable list when released
+by the shrinker.
index 3993035..9c2b2dd 100644 (file)
@@ -1833,6 +1833,7 @@ C:        irc://irc.oftc.net/asahi-dev
 T:     git https://github.com/AsahiLinux/linux.git
 F:     Documentation/devicetree/bindings/arm/apple.yaml
 F:     Documentation/devicetree/bindings/arm/apple/*
+F:     Documentation/devicetree/bindings/clock/apple,nco.yaml
 F:     Documentation/devicetree/bindings/i2c/apple,i2c.yaml
 F:     Documentation/devicetree/bindings/interrupt-controller/apple,*
 F:     Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
@@ -1841,6 +1842,7 @@ F:        Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
 F:     Documentation/devicetree/bindings/power/apple*
 F:     Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
 F:     arch/arm64/boot/dts/apple/
+F:     drivers/clk/clk-apple-nco.c
 F:     drivers/i2c/busses/i2c-pasemi-core.c
 F:     drivers/i2c/busses/i2c-pasemi-platform.c
 F:     drivers/irqchip/irq-apple-aic.c
@@ -5155,6 +5157,20 @@ S:       Supported
 F:     drivers/cpuidle/cpuidle-psci.h
 F:     drivers/cpuidle/cpuidle-psci-domain.c
 
+CPUIDLE DRIVER - DT IDLE PM DOMAIN
+M:     Ulf Hansson <ulf.hansson@linaro.org>
+L:     linux-pm@vger.kernel.org
+S:     Supported
+F:     drivers/cpuidle/dt_idle_genpd.c
+F:     drivers/cpuidle/dt_idle_genpd.h
+
+CPUIDLE DRIVER - RISC-V SBI
+M:     Anup Patel <anup@brainfault.org>
+L:     linux-pm@vger.kernel.org
+L:     linux-riscv@lists.infradead.org
+S:     Maintained
+F:     drivers/cpuidle/cpuidle-riscv-sbi.c
+
 CRAMFS FILESYSTEM
 M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Maintained
@@ -9515,6 +9531,12 @@ M:       Stanislaw Gruszka <stf_xl@wp.pl>
 S:     Maintained
 F:     drivers/usb/atm/ueagle-atm.c
 
+IMAGIS TOUCHSCREEN DRIVER
+M:     Markuss Broks <markuss.broks@gmail.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
+F:     drivers/input/touchscreen/imagis.c
+
 IMGTEC ASCII LCD DRIVER
 M:     Paul Burton <paulburton@kernel.org>
 S:     Maintained
@@ -11121,17 +11143,6 @@ F:     drivers/ata/
 F:     include/linux/ata.h
 F:     include/linux/libata.h
 
-LIBNVDIMM BLK: MMIO-APERTURE DRIVER
-M:     Dan Williams <dan.j.williams@intel.com>
-M:     Vishal Verma <vishal.l.verma@intel.com>
-M:     Dave Jiang <dave.jiang@intel.com>
-L:     nvdimm@lists.linux.dev
-S:     Supported
-Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
-P:     Documentation/nvdimm/maintainer-entry-profile.rst
-F:     drivers/nvdimm/blk.c
-F:     drivers/nvdimm/region_devs.c
-
 LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dan Williams <dan.j.williams@intel.com>
@@ -13662,6 +13673,7 @@ B:      mailto:netdev@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 F:     Documentation/networking/
+F:     Documentation/process/maintainer-netdev.rst
 F:     include/linux/in.h
 F:     include/linux/net.h
 F:     include/linux/netdevice.h
@@ -14076,7 +14088,10 @@ M:     Abel Vesa <abel.vesa@nxp.com>
 L:     linux-clk@vger.kernel.org
 L:     linux-imx@nxp.com
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/abelvesa/linux.git clk/imx
+F:     Documentation/devicetree/bindings/clock/imx*
 F:     drivers/clk/imx/
+F:     include/dt-bindings/clock/imx*
 
 NXP i.MX 8MQ DCSS DRIVER
 M:     Laurentiu Palcu <laurentiu.palcu@oss.nxp.com>
@@ -14627,6 +14642,12 @@ L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     drivers/char/hw_random/optee-rng.c
 
+OP-TEE RTC DRIVER
+M:     Clément Léger <clement.leger@bootlin.com>
+L:     linux-rtc@vger.kernel.org
+S:     Maintained
+F:     drivers/rtc/rtc-optee.c
+
 OPA-VNIC DRIVER
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
 M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
@@ -18734,12 +18755,12 @@ M:    Ion Badulescu <ionut@badula.org>
 S:     Odd Fixes
 F:     drivers/net/ethernet/adaptec/starfire*
 
-STARFIVE JH7100 CLOCK DRIVER
+STARFIVE JH7100 CLOCK DRIVERS
 M:     Emil Renner Berthing <kernel@esmil.dk>
 S:     Maintained
-F:     Documentation/devicetree/bindings/clock/starfive,jh7100-clkgen.yaml
-F:     drivers/clk/starfive/clk-starfive-jh7100.c
-F:     include/dt-bindings/clock/starfive-jh7100.h
+F:     Documentation/devicetree/bindings/clock/starfive,jh7100-*.yaml
+F:     drivers/clk/starfive/clk-starfive-jh7100*
+F:     include/dt-bindings/clock/starfive-jh7100*.h
 
 STARFIVE JH7100 PINCTRL DRIVER
 M:     Emil Renner Berthing <kernel@esmil.dk>
@@ -20544,14 +20565,15 @@ F:    Documentation/admin-guide/media/zr364xx*
 F:     drivers/media/usb/zr364xx/
 
 USER-MODE LINUX (UML)
-M:     Jeff Dike <jdike@addtoit.com>
 M:     Richard Weinberger <richard@nod.at>
 M:     Anton Ivanov <anton.ivanov@cambridgegreys.com>
+M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-um@lists.infradead.org
 S:     Maintained
 W:     http://user-mode-linux.sourceforge.net
 Q:     https://patchwork.ozlabs.org/project/linux-um/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux.git next
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux.git fixes
 F:     Documentation/virt/uml/
 F:     arch/um/
 F:     arch/x86/um/
index c28c5d9..18ecb49 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -424,19 +424,26 @@ HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
 HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
 
 ifneq ($(LLVM),)
-HOSTCC = clang
-HOSTCXX        = clang++
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+HOSTCC = $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+HOSTCXX        = $(LLVM_PREFIX)clang++$(LLVM_SUFFIX)
 else
 HOSTCC = gcc
 HOSTCXX        = g++
 endif
 
-export KBUILD_USERCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
-                             -O2 -fomit-frame-pointer -std=gnu11 \
-                             -Wdeclaration-after-statement
-export KBUILD_USERLDFLAGS :=
+KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+                        -O2 -fomit-frame-pointer -std=gnu11 \
+                        -Wdeclaration-after-statement
+KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
+KBUILD_USERLDFLAGS := $(USERLDFLAGS)
 
-KBUILD_HOSTCFLAGS   := $(KBUILD_USERCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
+KBUILD_HOSTCFLAGS   := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
 KBUILD_HOSTCXXFLAGS := -Wall -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
 KBUILD_HOSTLDFLAGS  := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
 KBUILD_HOSTLDLIBS   := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
@@ -444,14 +451,14 @@ KBUILD_HOSTLDLIBS   := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
 # Make variables (CC, etc...)
 CPP            = $(CC) -E
 ifneq ($(LLVM),)
-CC             = clang
-LD             = ld.lld
-AR             = llvm-ar
-NM             = llvm-nm
-OBJCOPY                = llvm-objcopy
-OBJDUMP                = llvm-objdump
-READELF                = llvm-readelf
-STRIP          = llvm-strip
+CC             = $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+LD             = $(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX)
+AR             = $(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX)
+NM             = $(LLVM_PREFIX)llvm-nm$(LLVM_SUFFIX)
+OBJCOPY                = $(LLVM_PREFIX)llvm-objcopy$(LLVM_SUFFIX)
+OBJDUMP                = $(LLVM_PREFIX)llvm-objdump$(LLVM_SUFFIX)
+READELF                = $(LLVM_PREFIX)llvm-readelf$(LLVM_SUFFIX)
+STRIP          = $(LLVM_PREFIX)llvm-strip$(LLVM_SUFFIX)
 else
 CC             = $(CROSS_COMPILE)gcc
 LD             = $(CROSS_COMPILE)ld
@@ -531,6 +538,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AW
 export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
 export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
 export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
@@ -1237,8 +1245,8 @@ define filechk_version.h
        echo \#define LINUX_VERSION_SUBLEVEL $(SUBLEVEL)
 endef
 
-$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0)
-$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0)
+$(version_h): PATCHLEVEL := $(or $(PATCHLEVEL), 0)
+$(version_h): SUBLEVEL := $(or $(SUBLEVEL), 0)
 $(version_h): FORCE
        $(call filechk,version.h)
 
@@ -1621,7 +1629,7 @@ help:
        @$(MAKE) -f $(srctree)/Documentation/Makefile dochelp
        @echo  ''
        @echo  'Architecture specific targets ($(SRCARCH)):'
-       @$(if $(archhelp),$(archhelp),\
+       @$(or $(archhelp),\
                echo '  No architecture specific help defined for $(SRCARCH)')
        @echo  ''
        @$(if $(boards), \
@@ -1838,7 +1846,7 @@ $(clean-dirs):
 
 clean: $(clean-dirs)
        $(call cmd,rmfiles)
-       @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
+       @find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
                \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
                -o -name '*.ko.*' \
                -o -name '*.dtb' -o -name '*.dtbo' -o -name '*.dtb.S' -o -name '*.dt.yaml' \
index 305f741..29b0167 100644 (file)
@@ -164,7 +164,13 @@ config ARCH_USE_BUILTIN_BSWAP
 
 config KRETPROBES
        def_bool y
-       depends on KPROBES && HAVE_KRETPROBES
+       depends on KPROBES && (HAVE_KRETPROBES || HAVE_RETHOOK)
+
+config KRETPROBE_ON_RETHOOK
+       def_bool y
+       depends on HAVE_RETHOOK
+       depends on KRETPROBES
+       select RETHOOK
 
 config USER_RETURN_NOTIFIER
        bool
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 827e887..13e1bdb 100644 (file)
                                reg = <0xb4100000 0x1000>;
                                interrupts = <0 105 0x4>;
                                status = "disabled";
-                               dmas = <&dwdma0 12 0 1>,
-                                       <&dwdma0 13 1 0>;
-                               dma-names = "tx", "rx";
+                               dmas = <&dwdma0 13 0 1>,
+                                       <&dwdma0 12 1 0>;
+                               dma-names = "rx", "tx";
                        };
 
                        thermal@e07008c4 {
index c87b881..9135533 100644 (file)
                                #size-cells = <0>;
                                interrupts = <0 31 0x4>;
                                status = "disabled";
-                               dmas = <&dwdma0 4 0 0>,
-                                       <&dwdma0 5 0 0>;
-                               dma-names = "tx", "rx";
+                               dmas = <&dwdma0 5 0 0>,
+                                       <&dwdma0 4 0 0>;
+                               dma-names = "rx", "tx";
                        };
 
                        rtc@e0580000 {
index 2b57579..e4dba54 100644 (file)
@@ -102,6 +102,8 @@ config CRYPTO_AES_ARM_BS
        depends on KERNEL_MODE_NEON
        select CRYPTO_SKCIPHER
        select CRYPTO_LIB_AES
+       select CRYPTO_AES
+       select CRYPTO_CBC
        select CRYPTO_SIMD
        help
          Use a faster and more secure NEON based implementation of AES in CBC,
index 0659ab4..11677fc 100644 (file)
@@ -59,8 +59,13 @@ static void __init omap_optee_init_check(void)
 u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
                                                         u32 arg3, u32 arg4)
 {
+       static u32 buf[NR_CPUS][5];
+       u32 *param;
+       int cpu;
        u32 ret;
-       u32 param[5];
+
+       cpu = get_cpu();
+       param = buf[cpu];
 
        param[0] = nargs;
        param[1] = arg1;
@@ -76,6 +81,8 @@ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
        outer_clean_range(__pa(param), __pa(param + 5));
        ret = omap_smc2(idx, flag, __pa(param));
 
+       put_cpu();
+
        return ret;
 }
 
@@ -119,8 +126,8 @@ phys_addr_t omap_secure_ram_mempool_base(void)
 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
 u32 omap3_save_secure_ram(void __iomem *addr, int size)
 {
+       static u32 param[5];
        u32 ret;
-       u32 param[5];
 
        if (size != OMAP3_SAVE_SECURE_RAM_SZ)
                return OMAP3_SAVE_SECURE_RAM_SZ;
@@ -153,8 +160,8 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size)
 u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
                           u32 arg1, u32 arg2, u32 arg3, u32 arg4)
 {
+       static u32 param[5];
        u32 ret;
-       u32 param[5];
 
        param[0] = nargs+1; /* RX-51 needs number of arguments + 1 */
        param[1] = arg1;
index 4b61541..82ffac6 100644 (file)
@@ -381,6 +381,7 @@ out:
  */
 postcore_initcall(atomic_pool_init);
 
+#ifdef CONFIG_CMA_AREAS
 struct dma_contig_early_reserve {
        phys_addr_t base;
        unsigned long size;
@@ -435,6 +436,7 @@ void __init dma_contiguous_remap(void)
                iotable_init(&map, 1);
        }
 }
+#endif
 
 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
 {
index 9ff6836..d7ffccb 100644 (file)
@@ -88,6 +88,10 @@ extern phys_addr_t arm_lowmem_limit;
 
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
+#ifdef CONFIG_CMA_AREAS
 void dma_contiguous_remap(void);
+#else
+static inline void dma_contiguous_remap(void) { }
+#endif
 
 unsigned long __clear_cr(unsigned long mask);
index 4a5c50f..81f13bd 100644 (file)
@@ -29,8 +29,7 @@ kapi: $(kapi-hdrs-y) $(gen-y)
 uapi:  $(uapi-hdrs-y)
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \
-          $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
+$(shell mkdir -p $(kapi) $(uapi))
 
 quiet_cmd_gen_mach = GEN     $@
       cmd_gen_mach = $(AWK) -f $(real-prereqs) > $@
index 6a60930..68103a8 100644 (file)
@@ -1,4 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \
-                       amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
-                       husky.dtb
+dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb
index 8e341be..c290d1c 100644 (file)
@@ -9,6 +9,7 @@
 /dts-v1/;
 
 /include/ "amd-seattle-soc.dtsi"
+/include/ "amd-seattle-cpus.dtsi"
 
 / {
        model = "AMD Seattle (Rev.B0) Development Board (Overdrive)";
        status = "ok";
 };
 
-&gpio2 {
-       status = "ok";
-};
-
-&gpio3 {
-       status = "ok";
-};
-
 &gpio4 {
        status = "ok";
 };
        };
 };
 
-&ipmi_kcs {
-       status = "ok";
-};
-
 &smb0 {
        /include/ "amd-seattle-xgbe-b.dtsi"
 };
index 92cef05..e0926f6 100644 (file)
@@ -9,6 +9,7 @@
 /dts-v1/;
 
 /include/ "amd-seattle-soc.dtsi"
+/include/ "amd-seattle-cpus.dtsi"
 
 / {
        model = "AMD Seattle (Rev.B1) Development Board (Overdrive)";
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive.dts b/arch/arm64/boot/dts/amd/amd-overdrive.dts
deleted file mode 100644 (file)
index 41b3a6c..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DTS file for AMD Seattle Overdrive Development Board
- *
- * Copyright (C) 2014 Advanced Micro Devices, Inc.
- */
-
-/dts-v1/;
-
-/include/ "amd-seattle-soc.dtsi"
-
-/ {
-       model = "AMD Seattle Development Board (Overdrive)";
-       compatible = "amd,seattle-overdrive", "amd,seattle";
-
-       chosen {
-               stdout-path = &serial0;
-       };
-};
-
-&ccp0 {
-       status = "ok";
-};
-
-&gpio0 {
-       status = "ok";
-};
-
-&gpio1 {
-       status = "ok";
-};
-
-&i2c0 {
-       status = "ok";
-};
-
-&pcie0 {
-       status = "ok";
-};
-
-&spi0 {
-       status = "ok";
-};
-
-&spi1 {
-       status = "ok";
-       sdcard0: sdcard@0 {
-               compatible = "mmc-spi-slot";
-               reg = <0>;
-               spi-max-frequency = <20000000>;
-               voltage-ranges = <3200 3400>;
-               gpios = <&gpio0 7 0>;
-               interrupt-parent = <&gpio0>;
-               interrupts = <7 3>;
-               pl022,hierarchy = <0>;
-               pl022,interface = <0>;
-               pl022,com-mode = <0x0>;
-               pl022,rx-level-trig = <0>;
-               pl022,tx-level-trig = <0>;
-       };
-};
-
-&v2m0 {
-       arm,msi-base-spi = <64>;
-       arm,msi-num-spis = <256>;
-};
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi
new file mode 100644 (file)
index 0000000..93688a0
--- /dev/null
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+       cpus {
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+                       cluster1 {
+                               core0 {
+                                       cpu = <&CPU2>;
+                               };
+                               core1 {
+                                       cpu = <&CPU3>;
+                               };
+                       };
+                       cluster2 {
+                               core0 {
+                                       cpu = <&CPU4>;
+                               };
+                               core1 {
+                                       cpu = <&CPU5>;
+                               };
+                       };
+                       cluster3 {
+                               core0 {
+                                       cpu = <&CPU6>;
+                               };
+                               core1 {
+                                       cpu = <&CPU7>;
+                               };
+                       };
+               };
+
+               CPU0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x0>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_0>;
+
+               };
+
+               CPU1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x1>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_0>;
+               };
+
+               CPU2: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x100>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_1>;
+               };
+
+               CPU3: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x101>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_1>;
+               };
+
+               CPU4: cpu@200 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x200>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_2>;
+               };
+
+               CPU5: cpu@201 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x201>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_2>;
+               };
+
+               CPU6: cpu@300 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x300>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_3>;
+               };
+
+               CPU7: cpu@301 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x301>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_3>;
+               };
+       };
+
+       L2_0: l2-cache0 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_1: l2-cache1 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_2: l2-cache2 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_3: l2-cache3 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L3: l3-cache {
+               cache-level = <3>;
+               cache-size = <0x800000>;
+               cache-line-size = <64>;
+               cache-sets = <8192>;
+               cache-unified;
+       };
+
+       pmu {
+               compatible = "arm,cortex-a57-pmu";
+               interrupts = <0x0 0x7 0x4>,
+                            <0x0 0x8 0x4>,
+                            <0x0 0x9 0x4>,
+                            <0x0 0xa 0x4>,
+                            <0x0 0xb 0x4>,
+                            <0x0 0xc 0x4>,
+                            <0x0 0xd 0x4>,
+                            <0x0 0xe 0x4>;
+               interrupt-affinity = <&CPU0>,
+                                    <&CPU1>,
+                                    <&CPU2>,
+                                    <&CPU3>,
+                                    <&CPU4>,
+                                    <&CPU5>,
+                                    <&CPU6>,
+                                    <&CPU7>;
+       };
+};
index b664e7a..6900205 100644 (file)
                             <1 10 0xff04>;
        };
 
-       pmu {
-               compatible = "arm,armv8-pmuv3";
-               interrupts = <0 7 4>,
-                            <0 8 4>,
-                            <0 9 4>,
-                            <0 10 4>,
-                            <0 11 4>,
-                            <0 12 4>,
-                            <0 13 4>,
-                            <0 14 4>;
-       };
-
        smb0: smb {
                compatible = "simple-bus";
                #address-cells = <2>;
@@ -70,6 +58,7 @@
                        reg = <0 0xe0300000 0 0xf0000>;
                        interrupts = <0 355 4>;
                        clocks = <&sataclk_333mhz>;
+                       iommus = <&sata0_smmu 0x0 0x1f>;
                        dma-coherent;
                };
 
                        reg = <0 0xe0d00000 0 0xf0000>;
                        interrupts = <0 354 4>;
                        clocks = <&sataclk_333mhz>;
+                       iommus = <&sata1_smmu 0x0e>,
+                                <&sata1_smmu 0x0f>,
+                                <&sata1_smmu 0x1e>;
+                       dma-coherent;
+               };
+
+               sata0_smmu: iommu@e0200000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0200000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 332 4>, <0 332 4>;
+                       #iommu-cells = <2>;
+                       dma-coherent;
+               };
+
+               sata1_smmu: iommu@e0c00000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0c00000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 331 4>, <0 331 4>;
+                       #iommu-cells = <1>;
                        dma-coherent;
                };
 
                        reg = <0 0xe0100000 0 0x10000>;
                        interrupts = <0 3 4>;
                        dma-coherent;
+                       iommus = <&sata1_smmu 0x00>,
+                                <&sata1_smmu 0x02>,
+                                <&sata1_smmu 0x40>,
+                                <&sata1_smmu 0x42>;
                };
 
                pcie0: pcie@f0000000 {
                        msi-parent = <&v2m0>;
                        reg = <0 0xf0000000 0 0x10000000>;
 
-                       interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+                       interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
                        interrupt-map =
-                               <0x1000 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>,
-                               <0x1000 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>,
-                               <0x1000 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>,
-                               <0x1000 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>;
+                               <0x1100 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>,
+                               <0x1100 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>,
+                               <0x1100 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>,
+                               <0x1100 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>,
+
+                               <0x1200 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x124 0x1>,
+                               <0x1200 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x125 0x1>,
+                               <0x1200 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x126 0x1>,
+                               <0x1200 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x127 0x1>,
+
+                               <0x1300 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x128 0x1>,
+                               <0x1300 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x129 0x1>,
+                               <0x1300 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x12a 0x1>,
+                               <0x1300 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x12b 0x1>;
 
                        dma-coherent;
                        dma-ranges = <0x43000000 0x0 0x0 0x0 0x0 0x100 0x0>;
                                <0x01000000 0x00 0x00000000 0x00 0xefff0000 0x00 0x00010000>,
                                /* 32-bit MMIO (size=2G) */
                                <0x02000000 0x00 0x40000000 0x00 0x40000000 0x00 0x80000000>,
-                               /* 64-bit MMIO (size= 124G) */
+                               /* 64-bit MMIO (size= 508G) */
                                <0x03000000 0x01 0x00000000 0x01 0x00000000 0x7f 0x00000000>;
+                       iommu-map = <0x0 &pcie_smmu 0x0 0x10000>;
+               };
+
+               pcie_smmu: iommu@e0a00000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0a00000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 333 4>, <0 333 4>;
+                       #iommu-cells = <1>;
+                       dma-coherent;
                };
 
                /* Perf CCN504 PMU */
index d974983..9259e54 100644 (file)
@@ -55,7 +55,7 @@
                clocks = <&xgmacclk0_dma_250mhz>, <&xgmacclk0_ptp_250mhz>;
                clock-names = "dma_clk", "ptp_clk";
                phy-mode = "xgmii";
-               #stream-id-cells = <16>;
+               iommus = <&xgmac0_smmu 0x00 0x17>; /* 0-7, 16-23 */
                dma-coherent;
        };
 
                clocks = <&xgmacclk1_dma_250mhz>, <&xgmacclk1_ptp_250mhz>;
                clock-names = "dma_clk", "ptp_clk";
                phy-mode = "xgmii";
-               #stream-id-cells = <16>;
+               iommus = <&xgmac1_smmu 0x00 0x17>; /* 0-7, 16-23 */
                dma-coherent;
        };
 
-       xgmac0_smmu: smmu@e0600000 {
+       xgmac0_smmu: iommu@e0600000 {
                 compatible = "arm,mmu-401";
                 reg = <0 0xe0600000 0 0x10000>;
                 #global-interrupts = <1>;
                               */
                              <0 336 4>,
                              <0 336 4>;
-
-                mmu-masters = <&xgmac0
-                         0  1  2  3  4  5  6  7
-                        16 17 18 19 20 21 22 23
-                >;
+               #iommu-cells = <2>;
+               dma-coherent;
         };
 
-        xgmac1_smmu: smmu@e0800000 {
+        xgmac1_smmu: iommu@e0800000 {
                 compatible = "arm,mmu-401";
                 reg = <0 0xe0800000 0 0x10000>;
                 #global-interrupts = <1>;
                               */
                              <0 335 4>,
                              <0 335 4>;
-
-                mmu-masters = <&xgmac1
-                         0  1  2  3  4  5  6  7
-                        16 17 18 19 20 21 22 23
-                >;
+               #iommu-cells = <2>;
+               dma-coherent;
         };
diff --git a/arch/arm64/boot/dts/amd/husky.dts b/arch/arm64/boot/dts/amd/husky.dts
deleted file mode 100644 (file)
index 7acde34..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DTS file for AMD/Linaro 96Boards Enterprise Edition Server (Husky) Board
- * Note: Based-on AMD Seattle Rev.B0
- *
- * Copyright (C) 2015 Advanced Micro Devices, Inc.
- */
-
-/dts-v1/;
-
-/include/ "amd-seattle-soc.dtsi"
-
-/ {
-       model = "Linaro 96Boards Enterprise Edition Server (Husky) Board";
-       compatible = "amd,seattle-overdrive", "amd,seattle";
-
-       chosen {
-               stdout-path = &serial0;
-       };
-
-       psci {
-               compatible   = "arm,psci-0.2";
-               method       = "smc";
-       };
-};
-
-&ccp0 {
-       status = "ok";
-       amd,zlib-support = <1>;
-};
-
-/**
- * NOTE: In Rev.B, gpio0 is reserved.
- */
-&gpio1 {
-       status = "ok";
-};
-
-&gpio2 {
-       status = "ok";
-};
-
-&gpio3 {
-       status = "ok";
-};
-
-&gpio4 {
-       status = "ok";
-};
-
-&i2c0 {
-       status = "ok";
-};
-
-&i2c1 {
-       status = "ok";
-};
-
-&pcie0 {
-       status = "ok";
-};
-
-&spi0 {
-       status = "ok";
-};
-
-&spi1 {
-       status = "ok";
-       sdcard0: sdcard@0 {
-               compatible = "mmc-spi-slot";
-               reg = <0>;
-               spi-max-frequency = <20000000>;
-               voltage-ranges = <3200 3400>;
-               pl022,hierarchy = <0>;
-               pl022,interface = <0>;
-               pl022,com-mode = <0x0>;
-               pl022,rx-level-trig = <0>;
-               pl022,tx-level-trig = <0>;
-       };
-};
-
-&smb0 {
-       /include/ "amd-seattle-xgbe-b.dtsi"
-};
index 01b01e3..35d1939 100644 (file)
                        clock-names = "i2c";
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(1)>;
-                       dmas = <&edma0 1 39>,
-                              <&edma0 1 38>;
-                       dma-names = "tx", "rx";
+                       dmas = <&edma0 1 38>,
+                              <&edma0 1 39>;
+                       dma-names = "rx", "tx";
                        status = "disabled";
                };
 
index 687fea6..4e7bd04 100644 (file)
                        interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(2)>;
-                       dmas = <&edma0 1 39>,
-                              <&edma0 1 38>;
-                       dma-names = "tx", "rx";
+                       dmas = <&edma0 1 38>,
+                              <&edma0 1 39>;
+                       dma-names = "rx", "tx";
                        status = "disabled";
                };
 
index 14f40ec..d009f92 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index cff570a..2b42c37 100644 (file)
@@ -29,7 +29,7 @@ $(obj)/simpleImage.$(DTB).ub: $(obj)/simpleImage.$(DTB) FORCE
        $(call if_changed,uimage)
 
 $(obj)/simpleImage.$(DTB).unstrip: vmlinux FORCE
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 
 $(obj)/simpleImage.$(DTB).strip: vmlinux FORCE
        $(call if_changed,strip)
index ef00dd3..b84e2cb 100644 (file)
@@ -12,7 +12,7 @@ $(obj)/linked_dtb.o: $(obj)/system.dtb
 # Generate system.dtb from $(DTB).dtb
 ifneq ($(DTB),system)
 $(obj)/system.dtb: $(obj)/$(DTB).dtb
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 endif
 endif
 
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 10bf90d..e6b21de 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syshdr := $(srctree)/scripts/syscallhdr.sh
 sysnr := $(srctree)/$(src)/syscallnr.sh
index 90fc95b..52e550b 100644 (file)
@@ -37,7 +37,7 @@ config PARISC
        select GENERIC_PCI_IOMAP
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select GENERIC_SMP_IDLE_THREAD
-       select GENERIC_CPU_DEVICES
+       select GENERIC_ARCH_TOPOLOGY if SMP
        select GENERIC_LIB_DEVMEM_IS_ALLOWED
        select SYSCTL_ARCH_UNALIGN_ALLOW
        select SYSCTL_EXCEPTION_TRACE
@@ -56,6 +56,7 @@ config PARISC
        select HAVE_ARCH_TRACEHOOK
        select HAVE_REGS_AND_STACK_ACCESS_API
        select GENERIC_SCHED_CLOCK
+       select GENERIC_IRQ_MIGRATION if SMP
        select HAVE_UNSTABLE_SCHED_CLOCK if SMP
        select LEGACY_TIMER_TICK
        select CPU_NO_EFFICIENT_FFS
@@ -279,16 +280,9 @@ config SMP
 
          If you don't know what to do here, say N.
 
-config PARISC_CPU_TOPOLOGY
-       bool "Support cpu topology definition"
-       depends on SMP
-       default y
-       help
-         Support PARISC cpu topology definition.
-
 config SCHED_MC
        bool "Multi-core scheduler support"
-       depends on PARISC_CPU_TOPOLOGY && PA8X00
+       depends on GENERIC_ARCH_TOPOLOGY && PA8X00
        help
          Multi-core scheduler support improves the CPU scheduler's decision
          making when dealing with multi-core CPU chips at a cost of slightly
index 2a9387a..7583fc3 100644 (file)
@@ -42,7 +42,7 @@ export LD_BFD
 
 # Set default 32 bits cross compilers for vdso
 CC_ARCHES_32 = hppa hppa2.0 hppa1.1
-CC_SUFFIXES  = linux linux-gnu unknown-linux-gnu
+CC_SUFFIXES  = linux linux-gnu unknown-linux-gnu suse-linux
 CROSS32_COMPILE := $(call cc-cross-prefix, \
        $(foreach a,$(CC_ARCHES_32), \
        $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
@@ -52,7 +52,7 @@ export CROSS32CC
 # Set default cross compiler for kernel build
 ifdef cross_compiling
        ifeq ($(CROSS_COMPILE),)
-               CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
+               CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
                CROSS_COMPILE := $(call cc-cross-prefix, \
                        $(foreach a,$(CC_ARCHES), \
                        $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
index 18b957a..b643092 100644 (file)
@@ -94,6 +94,9 @@ int pdc_sti_call(unsigned long func, unsigned long flags,
                  unsigned long glob_cfg);
 
 int __pdc_cpu_rendezvous(void);
+void pdc_cpu_rendezvous_lock(void);
+void pdc_cpu_rendezvous_unlock(void);
+
 static inline char * os_id_to_string(u16 os_id) {
        switch(os_id) {
        case OS_ID_NONE:        return "No OS";
index 24355ed..8f16037 100644 (file)
@@ -83,6 +83,7 @@
 #define PDC_PAT_CPU_RENDEZVOUS         6L /* Rendezvous CPU */
 #define PDC_PAT_CPU_GET_CLOCK_INFO     7L /* Return CPU Clock info */
 #define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
+#define PDC_PAT_CPU_GET_PDC_ENTRYPOINT 11L /* Return PDC Entry point */
 #define PDC_PAT_CPU_PLUNGE_FABRIC      128L /* Plunge Fabric */
 #define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache 
                                                  * Cleansing Mode */
@@ -356,7 +357,7 @@ struct pdc_pat_cell_mod_maddr_block {       /* PDC_PAT_CELL_MODULE */
 
 typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
 
-
+extern int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry);
 extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
 extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
 extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
index 0063642..4621ceb 100644 (file)
@@ -95,6 +95,7 @@ struct cpuinfo_parisc {
 
 extern struct system_cpuinfo_parisc boot_cpu_data;
 DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
+extern int time_keeper_id;             /* CPU used for timekeeping */
 
 #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
 
index 2279ebe..94d1f21 100644 (file)
@@ -44,12 +44,7 @@ static inline void smp_send_all_nop(void) { return; }
 
 #define NO_PROC_ID             0xFF            /* No processor magic marker */
 #define ANY_PROC_ID            0xFF            /* Any processor magic marker */
-static inline int __cpu_disable (void) {
-  return 0;
-}
-static inline void __cpu_die (unsigned int cpu) {
-  while(1)
-    ;
-}
+int __cpu_disable(void);
+void __cpu_die(unsigned int cpu);
 
 #endif /*  __ASM_SMP_H */
index 41b3ddb..c822bd0 100644 (file)
        pa;                                             \
 })
 
+#define CR_EIEM 15     /* External Interrupt Enable Mask */
+#define CR_CR16 16     /* CR16 Interval Timer */
+#define CR_EIRR 23     /* External Interrupt Request Register */
+
 #define mfctl(reg)     ({              \
        unsigned long cr;               \
        __asm__ __volatile__(           \
-               "mfctl " #reg ",%0" :   \
-                "=r" (cr)              \
+               "mfctl %1,%0" :         \
+                "=r" (cr) : "i" (reg)  \
        );                              \
        cr;                             \
 })
                : /* no outputs */ \
                : "r" (gr), "i" (cr) : "memory")
 
-/* these are here to de-mystefy the calling code, and to provide hooks */
-/* which I needed for debugging EIEM problems -PB */
-#define get_eiem() mfctl(15)
-static inline void set_eiem(unsigned long val)
-{
-       mtctl(val, 15);
-}
+#define get_eiem()     mfctl(CR_EIEM)
+#define set_eiem(val)  mtctl(val, CR_EIEM)
 
 #define mfsp(reg)      ({              \
        unsigned long cr;               \
index 6f0750c..406afb3 100644 (file)
@@ -1,33 +1,16 @@
 #ifndef _ASM_PARISC_TOPOLOGY_H
 #define _ASM_PARISC_TOPOLOGY_H
 
-#ifdef CONFIG_PARISC_CPU_TOPOLOGY
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
 
 #include <linux/cpumask.h>
-
-struct cputopo_parisc {
-       int thread_id;
-       int core_id;
-       int socket_id;
-       cpumask_t thread_sibling;
-       cpumask_t core_sibling;
-};
-
-extern struct cputopo_parisc cpu_topology[NR_CPUS];
-
-#define topology_physical_package_id(cpu)      (cpu_topology[cpu].socket_id)
-#define topology_core_id(cpu)          (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu)     (&cpu_topology[cpu].core_sibling)
-#define topology_sibling_cpumask(cpu)  (&cpu_topology[cpu].thread_sibling)
-
-void init_cpu_topology(void);
-void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(int cpu);
+#include <linux/arch_topology.h>
 
 #else
 
 static inline void init_cpu_topology(void) { }
 static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline void reset_cpu_topology(void) { }
 
 #endif
 
index d579243..d0bfac8 100644 (file)
@@ -31,7 +31,7 @@ obj-$(CONFIG_AUDIT)   += audit.o
 obj64-$(CONFIG_AUDIT)  += compat_audit.o
 # only supported for PCX-W/U in 64-bit mode at the moment
 obj-$(CONFIG_64BIT)    += perf.o perf_asm.o $(obj64-y)
-obj-$(CONFIG_PARISC_CPU_TOPOLOGY)      += topology.o
+obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY)    += topology.o
 obj-$(CONFIG_FUNCTION_TRACER)          += ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
 obj-$(CONFIG_JUMP_LABEL)               += jump_label.o
index 456e879..2334819 100644 (file)
@@ -273,7 +273,7 @@ parisc_cache_init(void)
        }
 }
 
-void __init disable_sr_hashing(void)
+void disable_sr_hashing(void)
 {
        int srhash_type, retval;
        unsigned long space_bits;
@@ -611,8 +611,8 @@ void
 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
 {
        if (pfn_valid(pfn)) {
-               flush_tlb_page(vma, vmaddr);
                if (likely(vma->vm_mm->context.space_id)) {
+                       flush_tlb_page(vma, vmaddr);
                        __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
                } else {
                        __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
@@ -624,7 +624,6 @@ void flush_kernel_vmap_range(void *vaddr, int size)
 {
        unsigned long start = (unsigned long)vaddr;
        unsigned long end = start + size;
-       unsigned long flags, physaddr;
 
        if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
            (unsigned long)size >= parisc_cache_flush_threshold) {
@@ -633,14 +632,8 @@ void flush_kernel_vmap_range(void *vaddr, int size)
                return;
        }
 
-       while (start < end) {
-               physaddr = lpa(start);
-               purge_tlb_start(flags);
-               pdtlb(SR_KERNEL, start);
-               purge_tlb_end(flags);
-               flush_dcache_page_asm(physaddr, start);
-               start += PAGE_SIZE;
-       }
+       flush_kernel_dcache_range_asm(start, end);
+       flush_tlb_kernel_range(start, end);
 }
 EXPORT_SYMBOL(flush_kernel_vmap_range);
 
@@ -648,7 +641,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
 {
        unsigned long start = (unsigned long)vaddr;
        unsigned long end = start + size;
-       unsigned long flags, physaddr;
 
        if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
            (unsigned long)size >= parisc_cache_flush_threshold) {
@@ -657,13 +649,7 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
                return;
        }
 
-       while (start < end) {
-               physaddr = lpa(start);
-               purge_tlb_start(flags);
-               pdtlb(SR_KERNEL, start);
-               purge_tlb_end(flags);
-               purge_dcache_page_asm(physaddr, start);
-               start += PAGE_SIZE;
-       }
+       purge_kernel_dcache_range_asm(start, end);
+       flush_tlb_kernel_range(start, end);
 }
 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
index 3370e34..6a7e315 100644 (file)
@@ -83,7 +83,7 @@ extern unsigned long pdc_result2[NUM_PDC_RESULT];
 
 /* Firmware needs to be initially set to narrow to determine the 
  * actual firmware width. */
-int parisc_narrow_firmware __ro_after_init = 1;
+int parisc_narrow_firmware __ro_after_init = 2;
 #endif
 
 /* On most currently-supported platforms, IODC I/O calls are 32-bit calls
@@ -174,6 +174,11 @@ void set_firmware_width_unlocked(void)
 void set_firmware_width(void)
 {
        unsigned long flags;
+
+       /* already initialized? */
+       if (parisc_narrow_firmware != 2)
+               return;
+
        spin_lock_irqsave(&pdc_lock, flags);
        set_firmware_width_unlocked();
        spin_unlock_irqrestore(&pdc_lock, flags);
@@ -324,7 +329,44 @@ int __pdc_cpu_rendezvous(void)
                return mem_pdc_call(PDC_PROC, 1, 0);
 }
 
+/**
+ * pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state
+ */
+void pdc_cpu_rendezvous_lock(void)
+{
+       spin_lock(&pdc_lock);
+}
+
+/**
+ * pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state
+ */
+void pdc_cpu_rendezvous_unlock(void)
+{
+       spin_unlock(&pdc_lock);
+}
+
+/**
+ * pdc_pat_get_PDC_entrypoint - Get PDC entry point for current CPU
+ * @retval: -1 on error, 0 on success
+ */
+int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry)
+{
+       int retval = 0;
+       unsigned long flags;
+
+       if (!IS_ENABLED(CONFIG_SMP) || !is_pdc_pat()) {
+               *pdc_entry = MEM_PDC;
+               return 0;
+       }
+
+       spin_lock_irqsave(&pdc_lock, flags);
+       retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_PDC_ENTRYPOINT,
+                       __pa(pdc_result));
+       *pdc_entry = pdc_result[0];
+       spin_unlock_irqrestore(&pdc_lock, flags);
 
+       return retval;
+}
 /**
  * pdc_chassis_warn - Fetches chassis warnings
  * @retval: -1 on error, 0 on success
index b24f777..e0a9e96 100644 (file)
@@ -162,6 +162,15 @@ $pgt_fill_loop:
        /* FALLTHROUGH */
        .procend
 
+#ifdef CONFIG_HOTPLUG_CPU
+       /* common_stext is far away in another section... jump there */
+       load32          PA(common_stext), %rp
+       bv,n            (%rp)
+
+       /* common_stext and smp_slave_stext needs to be in text section */
+       .text
+#endif
+
        /*
        ** Code Common to both Monarch and Slave processors.
        ** Entry:
@@ -371,8 +380,6 @@ smp_slave_stext:
        .procend
 #endif /* CONFIG_SMP */
 
-ENDPROC(parisc_kernel_start)
-
 #ifndef CONFIG_64BIT
        .section .data..ro_after_init
 
index eb18e16..0fe2d79 100644 (file)
@@ -105,28 +105,12 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
        if (irqd_is_per_cpu(d))
                return -EINVAL;
 
-       /* whatever mask they set, we just allow one CPU */
-       cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1),
-                                       dest, cpu_online_mask);
+       cpu_dest = cpumask_first_and(dest, cpu_online_mask);
        if (cpu_dest >= nr_cpu_ids)
-               cpu_dest = cpumask_first_and(dest, cpu_online_mask);
+               cpu_dest = cpumask_first(cpu_online_mask);
 
        return cpu_dest;
 }
-
-static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
-                               bool force)
-{
-       int cpu_dest;
-
-       cpu_dest = cpu_check_affinity(d, dest);
-       if (cpu_dest < 0)
-               return -1;
-
-       cpumask_copy(irq_data_get_affinity_mask(d), dest);
-
-       return 0;
-}
 #endif
 
 static struct irq_chip cpu_interrupt_type = {
@@ -135,9 +119,6 @@ static struct irq_chip cpu_interrupt_type = {
        .irq_unmask             = cpu_unmask_irq,
        .irq_ack                = cpu_ack_irq,
        .irq_eoi                = cpu_eoi_irq,
-#ifdef CONFIG_SMP
-       .irq_set_affinity       = cpu_set_affinity_irq,
-#endif
        /* XXX: Needs to be written.  We managed without it so far, but
         * we really ought to write it.
         */
@@ -582,7 +563,7 @@ static void claim_cpu_irqs(void)
 #endif
 }
 
-void __init init_IRQ(void)
+void init_IRQ(void)
 {
        local_irq_disable();    /* PARANOID - should already be disabled */
        mtctl(~0UL, 23);        /* EIRR : clear all pending external intr */
index b2ba6d6..b4c3f01 100644 (file)
@@ -1264,7 +1264,7 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
        nop
 ENDPROC_CFI(flush_kernel_icache_range_asm)
 
-       __INIT
+       .text
 
        /* align should cover use of rfi in disable_sr_hashing_asm and
         * srdis_done.
index 80a0ab3..e59574f 100644 (file)
@@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
 
        *need_unmap = 1;
        set_fixmap(fixmap, page_to_phys(page));
-       if (flags)
-               raw_spin_lock_irqsave(&patch_lock, *flags);
-       else
-               __acquire(&patch_lock);
+       raw_spin_lock_irqsave(&patch_lock, *flags);
 
        return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
 }
@@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
 {
        clear_fixmap(fixmap);
 
-       if (flags)
-               raw_spin_unlock_irqrestore(&patch_lock, *flags);
-       else
-               __release(&patch_lock);
+       raw_spin_unlock_irqrestore(&patch_lock, *flags);
 }
 
 void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
@@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
        int mapped;
 
        /* Make sure we don't have any aliases in cache */
-       flush_kernel_vmap_range(addr, len);
-       flush_icache_range(start, end);
+       flush_kernel_dcache_range_asm(start, end);
+       flush_kernel_icache_range_asm(start, end);
+       flush_tlb_kernel_range(start, end);
 
        p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
 
@@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
                         * We're crossing a page boundary, so
                         * need to remap
                         */
-                       flush_kernel_vmap_range((void *)fixmap,
-                                               (p-fixmap) * sizeof(*p));
+                       flush_kernel_dcache_range_asm((unsigned long)fixmap,
+                                                     (unsigned long)p);
+                       flush_tlb_kernel_range((unsigned long)fixmap,
+                                              (unsigned long)p);
                        if (mapped)
                                patch_unmap(FIX_TEXT_POKE0, &flags);
                        p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
@@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
                }
        }
 
-       flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
+       flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
+       flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
        if (mapped)
                patch_unmap(FIX_TEXT_POKE0, &flags);
-       flush_icache_range(start, end);
 }
 
 void __kprobes __patch_text(void *addr, u32 insn)
index 2030c77..28b6a2a 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/rcupdate.h>
 #include <linux/random.h>
 #include <linux/nmi.h>
+#include <linux/sched/hotplug.h>
 
 #include <asm/io.h>
 #include <asm/asm-offsets.h>
@@ -46,6 +47,7 @@
 #include <asm/pdc_chassis.h>
 #include <asm/unwind.h>
 #include <asm/sections.h>
+#include <asm/cacheflush.h>
 
 #define COMMAND_GLOBAL  F_EXTEND(0xfffe0030)
 #define CMD_RESET       5       /* reset any module */
@@ -158,10 +160,29 @@ void release_thread(struct task_struct *dead_task)
 int running_on_qemu __ro_after_init;
 EXPORT_SYMBOL(running_on_qemu);
 
-void __cpuidle arch_cpu_idle_dead(void)
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ */
+void arch_cpu_idle_dead(void)
 {
-       /* nop on real hardware, qemu will offline CPU. */
-       asm volatile("or %%r31,%%r31,%%r31\n":::);
+#ifdef CONFIG_HOTPLUG_CPU
+       idle_task_exit();
+
+       local_irq_disable();
+
+       /* Tell __cpu_die() that this CPU is now safe to dispose of. */
+       (void)cpu_report_death();
+
+       /* Ensure that the cache lines are written out. */
+       flush_cache_all_local();
+       flush_tlb_all_local(NULL);
+
+       /* Let PDC firmware put CPU into firmware idle loop. */
+       __pdc_cpu_rendezvous();
+
+       pr_warn("PDC does not provide rendezvous function.\n");
+#endif
+       while (1);
 }
 
 void __cpuidle arch_cpu_idle(void)
index 1b6129e..d986921 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
+#include <asm/topology.h>
 #include <asm/param.h>
 #include <asm/cache.h>
 #include <asm/hardware.h>      /* for register_parisc_driver() stuff */
@@ -317,7 +318,7 @@ void __init collect_boot_cpu_data(void)
  *
  * o Enable CPU profiling hooks.
  */
-int __init init_per_cpu(int cpunum)
+int init_per_cpu(int cpunum)
 {
        int ret;
        struct pdc_coproc_cfg coproc_cfg;
@@ -390,7 +391,7 @@ show_cpuinfo (struct seq_file *m, void *v)
                                 boot_cpu_data.cpu_hz / 1000000,
                                 boot_cpu_data.cpu_hz % 1000000  );
 
-#ifdef CONFIG_PARISC_CPU_TOPOLOGY
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
                seq_printf(m, "physical id\t: %d\n",
                                topology_physical_package_id(cpu));
                seq_printf(m, "siblings\t: %d\n",
@@ -460,5 +461,6 @@ static struct parisc_driver cpu_driver __refdata = {
  */
 void __init processor_init(void)
 {
+       reset_cpu_topology();
        register_parisc_driver(&cpu_driver);
 }
index a32a882..24d0744 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/kgdb.h>
+#include <linux/sched/hotplug.h>
 
 #include <linux/atomic.h>
 #include <asm/current.h>
@@ -60,8 +61,6 @@ volatile struct task_struct *smp_init_current_idle_task;
 /* track which CPU is booting */
 static volatile int cpu_now_booting;
 
-static int parisc_max_cpus = 1;
-
 static DEFINE_PER_CPU(spinlock_t, ipi_lock);
 
 enum ipi_message_type {
@@ -269,7 +268,7 @@ void arch_send_call_function_single_ipi(int cpu)
 /*
  * Called by secondaries to update state and initialize CPU registers.
  */
-static void __init
+static void
 smp_cpu_init(int cpunum)
 {
        extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
@@ -309,7 +308,7 @@ smp_cpu_init(int cpunum)
  * Slaves start using C here. Indirectly called from smp_slave_stext.
  * Do what start_kernel() and main() do for boot strap processor (aka monarch)
  */
-void __init smp_callin(unsigned long pdce_proc)
+void smp_callin(unsigned long pdce_proc)
 {
        int slave_id = cpu_now_booting;
 
@@ -334,11 +333,28 @@ void __init smp_callin(unsigned long pdce_proc)
 /*
  * Bring one cpu online.
  */
-int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
+static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
 {
        const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
        long timeout;
 
+#ifdef CONFIG_HOTPLUG_CPU
+       int i;
+
+       /* reset irq statistics for this CPU */
+       memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
+       for (i = 0; i < NR_IRQS; i++) {
+               struct irq_desc *desc = irq_to_desc(i);
+
+               if (desc && desc->kstat_irqs)
+                       *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
+       }
+#endif
+
+       /* wait until last booting CPU has started. */
+       while (cpu_now_booting)
+               ;
+
        /* Let _start know what logical CPU we're booting
        ** (offset into init_tasks[],cpu_data[])
        */
@@ -374,7 +390,6 @@ int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
                if(cpu_online(cpuid)) {
                        /* Which implies Slave has started up */
                        cpu_now_booting = 0;
-                       smp_init_current_idle_task = NULL;
                        goto alive ;
                }
                udelay(100);
@@ -415,25 +430,88 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                spin_lock_init(&per_cpu(ipi_lock, cpu));
 
        init_cpu_present(cpumask_of(0));
-
-       parisc_max_cpus = max_cpus;
-       if (!max_cpus)
-               printk(KERN_INFO "SMP mode deactivated.\n");
 }
 
 
-void smp_cpus_done(unsigned int cpu_max)
+void __init smp_cpus_done(unsigned int cpu_max)
 {
-       return;
 }
 
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
-       if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
-               return -ENOSYS;
+       if (cpu_online(cpu))
+               return 0;
+
+       if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
+               return -EIO;
+
+       return cpu_online(cpu) ? 0 : -EIO;
+}
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       unsigned int cpu = smp_processor_id();
+
+       remove_cpu_topology(cpu);
+
+       /*
+        * Take this CPU offline.  Once we clear this, we can't return,
+        * and we must not schedule until we're ready to give up the cpu.
+        */
+       set_cpu_online(cpu, false);
+
+       /* Find a new timesync master */
+       if (cpu == time_keeper_id) {
+               time_keeper_id = cpumask_first(cpu_online_mask);
+               pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
+       }
+
+       disable_percpu_irq(IPI_IRQ);
+
+       irq_migrate_all_off_this_cpu();
+
+       flush_cache_all_local();
+       flush_tlb_all_local(NULL);
+
+       /* disable all irqs, including timer irq */
+       local_irq_disable();
+
+       /* wait for next timer irq ... */
+       mdelay(1000/HZ+100);
+
+       /* ... and then clear all pending external irqs */
+       set_eiem(0);
+       mtctl(~0UL, CR_EIRR);
+       mfctl(CR_EIRR);
+       mtctl(0, CR_EIRR);
+#endif
+       return 0;
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+       pdc_cpu_rendezvous_lock();
+
+       if (!cpu_wait_death(cpu, 5)) {
+               pr_crit("CPU%u: cpu didn't die\n", cpu);
+               return;
+       }
+       pr_info("CPU%u: is shutting down\n", cpu);
+
+       /* set task's state to interruptible sleep */
+       set_current_state(TASK_INTERRUPTIBLE);
+       schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ);
 
-       return cpu_online(cpu) ? 0 : -ENOSYS;
+       pdc_cpu_rendezvous_unlock();
 }
 
 #ifdef CONFIG_PROC_FS
index d63f18d..8440c16 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 061119a..bb27dfe 100644 (file)
@@ -40,6 +40,8 @@
 
 #include <linux/timex.h>
 
+int time_keeper_id __read_mostly;      /* CPU used for timekeeping. */
+
 static unsigned long clocktick __ro_after_init;        /* timer cycles per tick */
 
 /*
@@ -84,7 +86,7 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
        cpuinfo->it_value = next_tick;
 
        /* Go do system house keeping. */
-       if (cpu != 0)
+       if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id))
                ticks_elapsed = 0;
        legacy_timer_tick(ticks_elapsed);
 
@@ -150,7 +152,7 @@ static struct clocksource clocksource_cr16 = {
        .flags                  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-void __init start_cpu_itimer(void)
+void start_cpu_itimer(void)
 {
        unsigned int cpu = smp_processor_id();
        unsigned long next_tick = mfctl(16) + clocktick;
index e88a6ce..9696e3c 100644 (file)
 #include <linux/percpu.h>
 #include <linux/sched.h>
 #include <linux/sched/topology.h>
+#include <linux/cpu.h>
 
 #include <asm/topology.h>
+#include <asm/sections.h>
 
- /*
-  * cpu topology table
-  */
-struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL_GPL(cpu_topology);
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
-const struct cpumask *cpu_coregroup_mask(int cpu)
-{
-       return &cpu_topology[cpu].core_sibling;
-}
-
-static void update_siblings_masks(unsigned int cpuid)
-{
-       struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
-       int cpu;
-
-       /* update core and thread sibling masks */
-       for_each_possible_cpu(cpu) {
-               cpu_topo = &cpu_topology[cpu];
-
-               if (cpuid_topo->socket_id != cpu_topo->socket_id)
-                       continue;
-
-               cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
-               if (cpu != cpuid)
-                       cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
-
-               if (cpuid_topo->core_id != cpu_topo->core_id)
-                       continue;
-
-               cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
-               if (cpu != cpuid)
-                       cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
-       }
-       smp_wmb();
-}
-
-static int dualcores_found __initdata;
+static int dualcores_found;
 
 /*
  * store_cpu_topology is called at boot when only one cpu is running
  * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
  * which prevents simultaneous write access to cpu_topology array
  */
-void __init store_cpu_topology(unsigned int cpuid)
+void store_cpu_topology(unsigned int cpuid)
 {
-       struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid];
+       struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
        struct cpuinfo_parisc *p;
        int max_socket = -1;
        unsigned long cpu;
@@ -71,6 +38,12 @@ void __init store_cpu_topology(unsigned int cpuid)
        if (cpuid_topo->core_id != -1)
                return;
 
+#ifdef CONFIG_HOTPLUG_CPU
+       per_cpu(cpu_devices, cpuid).hotpluggable = 1;
+#endif
+       if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid))
+               pr_warn("Failed to register CPU%d device", cpuid);
+
        /* create cpu topology mapping */
        cpuid_topo->thread_id = -1;
        cpuid_topo->core_id = 0;
@@ -86,25 +59,25 @@ void __init store_cpu_topology(unsigned int cpuid)
                        cpuid_topo->core_id = cpu_topology[cpu].core_id;
                        if (p->cpu_loc) {
                                cpuid_topo->core_id++;
-                               cpuid_topo->socket_id = cpu_topology[cpu].socket_id;
+                               cpuid_topo->package_id = cpu_topology[cpu].package_id;
                                dualcores_found = 1;
                                continue;
                        }
                }
 
-               if (cpuid_topo->socket_id == -1)
-                       max_socket = max(max_socket, cpu_topology[cpu].socket_id);
+               if (cpuid_topo->package_id == -1)
+                       max_socket = max(max_socket, cpu_topology[cpu].package_id);
        }
 
-       if (cpuid_topo->socket_id == -1)
-               cpuid_topo->socket_id = max_socket + 1;
+       if (cpuid_topo->package_id == -1)
+               cpuid_topo->package_id = max_socket + 1;
 
        update_siblings_masks(cpuid);
 
        pr_info("CPU%u: cpu core %d of socket %d\n",
                cpuid,
                cpu_topology[cpuid].core_id,
-               cpu_topology[cpuid].socket_id);
+               cpu_topology[cpuid].package_id);
 }
 
 static struct sched_domain_topology_level parisc_mc_topology[] = {
@@ -122,20 +95,6 @@ static struct sched_domain_topology_level parisc_mc_topology[] = {
  */
 void __init init_cpu_topology(void)
 {
-       unsigned int cpu;
-
-       /* init core mask and capacity */
-       for_each_possible_cpu(cpu) {
-               struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]);
-
-               cpu_topo->thread_id = -1;
-               cpu_topo->core_id =  -1;
-               cpu_topo->socket_id = -1;
-               cpumask_clear(&cpu_topo->core_sibling);
-               cpumask_clear(&cpu_topo->thread_sibling);
-       }
-       smp_wmb();
-
        /* Set scheduler topology descriptor */
        if (dualcores_found)
                set_sched_topology(parisc_mc_topology);
index 219559d..47ed639 100644 (file)
@@ -48,6 +48,11 @@ struct dev_archdata {
 
 struct pdev_archdata {
        u64 dma_mask;
+       /*
+        * Pointer to nvdimm_pmu structure, to handle the unregistering
+        * of pmu device
+        */
+       void *priv;
 };
 
 #endif /* _ASM_POWERPC_DEVICE_H */
index 5476f62..9d7bd81 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 1238b94..f58728d 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/papr_pdsm.h>
 #include <asm/mce.h>
 #include <asm/unaligned.h>
+#include <linux/perf_event.h>
 
 #define BIND_ANY_ADDR (~0ul)
 
@@ -124,6 +125,8 @@ struct papr_scm_priv {
        /* The bits which needs to be overridden */
        u64 health_bitmap_inject_mask;
 
+        /* array to have event_code and stat_id mappings */
+       char **nvdimm_events_map;
 };
 
 static int papr_scm_pmem_flush(struct nd_region *nd_region,
@@ -344,6 +347,225 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
        return 0;
 }
 
+#ifdef CONFIG_PERF_EVENTS
+#define to_nvdimm_pmu(_pmu)    container_of(_pmu, struct nvdimm_pmu, pmu)
+
+static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
+{
+       struct papr_scm_perf_stat *stat;
+       struct papr_scm_perf_stats *stats;
+       struct papr_scm_priv *p = (struct papr_scm_priv *)dev->driver_data;
+       int rc, size;
+
+       /* Allocate request buffer enough to hold single performance stat */
+       size = sizeof(struct papr_scm_perf_stats) +
+               sizeof(struct papr_scm_perf_stat);
+
+       if (!p || !p->nvdimm_events_map)
+               return -EINVAL;
+
+       stats = kzalloc(size, GFP_KERNEL);
+       if (!stats)
+               return -ENOMEM;
+
+       stat = &stats->scm_statistic[0];
+       memcpy(&stat->stat_id,
+              p->nvdimm_events_map[event->attr.config],
+               sizeof(stat->stat_id));
+       stat->stat_val = 0;
+
+       rc = drc_pmem_query_stats(p, stats, 1);
+       if (rc < 0) {
+               kfree(stats);
+               return rc;
+       }
+
+       *count = be64_to_cpu(stat->stat_val);
+       kfree(stats);
+       return 0;
+}
+
+static int papr_scm_pmu_event_init(struct perf_event *event)
+{
+       struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
+       struct papr_scm_priv *p;
+
+       if (!nd_pmu)
+               return -EINVAL;
+
+       /* test the event attr type for PMU enumeration */
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+
+       /* it does not support event sampling mode */
+       if (is_sampling_event(event))
+               return -EOPNOTSUPP;
+
+       /* no branch sampling */
+       if (has_branch_stack(event))
+               return -EOPNOTSUPP;
+
+       p = (struct papr_scm_priv *)nd_pmu->dev->driver_data;
+       if (!p)
+               return -EINVAL;
+
+       /* Invalid eventcode */
+       if (event->attr.config == 0 || event->attr.config > 16)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int papr_scm_pmu_add(struct perf_event *event, int flags)
+{
+       u64 count;
+       int rc;
+       struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
+
+       if (!nd_pmu)
+               return -EINVAL;
+
+       if (flags & PERF_EF_START) {
+               rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &count);
+               if (rc)
+                       return rc;
+
+               local64_set(&event->hw.prev_count, count);
+       }
+
+       return 0;
+}
+
+static void papr_scm_pmu_read(struct perf_event *event)
+{
+       u64 prev, now;
+       int rc;
+       struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
+
+       if (!nd_pmu)
+               return;
+
+       rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &now);
+       if (rc)
+               return;
+
+       prev = local64_xchg(&event->hw.prev_count, now);
+       local64_add(now - prev, &event->count);
+}
+
+static void papr_scm_pmu_del(struct perf_event *event, int flags)
+{
+       papr_scm_pmu_read(event);
+}
+
+static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu)
+{
+       struct papr_scm_perf_stat *stat;
+       struct papr_scm_perf_stats *stats;
+       char *statid;
+       int index, rc, count;
+       u32 available_events;
+
+       if (!p->stat_buffer_len)
+               return -ENOENT;
+
+       available_events = (p->stat_buffer_len  - sizeof(struct papr_scm_perf_stats))
+                       / sizeof(struct papr_scm_perf_stat);
+
+       /* Allocate the buffer for phyp where stats are written */
+       stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
+       if (!stats) {
+               rc = -ENOMEM;
+               return rc;
+       }
+
+       /* Allocate memory to nvdimm_event_map */
+       p->nvdimm_events_map = kcalloc(available_events, sizeof(char *), GFP_KERNEL);
+       if (!p->nvdimm_events_map) {
+               rc = -ENOMEM;
+               goto out_stats;
+       }
+
+       /* Called to get list of events supported */
+       rc = drc_pmem_query_stats(p, stats, 0);
+       if (rc)
+               goto out_nvdimm_events_map;
+
+       for (index = 0, stat = stats->scm_statistic, count = 0;
+                    index < available_events; index++, ++stat) {
+               statid = kzalloc(strlen(stat->stat_id) + 1, GFP_KERNEL);
+               if (!statid) {
+                       rc = -ENOMEM;
+                       goto out_nvdimm_events_map;
+               }
+
+               strcpy(statid, stat->stat_id);
+               p->nvdimm_events_map[count] = statid;
+               count++;
+       }
+       p->nvdimm_events_map[count] = NULL;
+       kfree(stats);
+       return 0;
+
+out_nvdimm_events_map:
+       kfree(p->nvdimm_events_map);
+out_stats:
+       kfree(stats);
+       return rc;
+}
+
+static void papr_scm_pmu_register(struct papr_scm_priv *p)
+{
+       struct nvdimm_pmu *nd_pmu;
+       int rc, nodeid;
+
+       nd_pmu = kzalloc(sizeof(*nd_pmu), GFP_KERNEL);
+       if (!nd_pmu) {
+               rc = -ENOMEM;
+               goto pmu_err_print;
+       }
+
+       rc = papr_scm_pmu_check_events(p, nd_pmu);
+       if (rc)
+               goto pmu_check_events_err;
+
+       nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
+       nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
+       nd_pmu->pmu.event_init = papr_scm_pmu_event_init;
+       nd_pmu->pmu.read = papr_scm_pmu_read;
+       nd_pmu->pmu.add = papr_scm_pmu_add;
+       nd_pmu->pmu.del = papr_scm_pmu_del;
+
+       nd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_INTERRUPT |
+                               PERF_PMU_CAP_NO_EXCLUDE;
+
+       /*updating the cpumask variable */
+       nodeid = numa_map_to_online_node(dev_to_node(&p->pdev->dev));
+       nd_pmu->arch_cpumask = *cpumask_of_node(nodeid);
+
+       rc = register_nvdimm_pmu(nd_pmu, p->pdev);
+       if (rc)
+               goto pmu_register_err;
+
+       /*
+        * Set archdata.priv value to nvdimm_pmu structure, to handle the
+        * unregistering of pmu device.
+        */
+       p->pdev->archdata.priv = nd_pmu;
+       return;
+
+pmu_register_err:
+       kfree(p->nvdimm_events_map);
+pmu_check_events_err:
+       kfree(nd_pmu);
+pmu_err_print:
+       dev_info(&p->pdev->dev, "nvdimm pmu didn't register rc=%d\n", rc);
+}
+
+#else
+static void papr_scm_pmu_register(struct papr_scm_priv *p) { }
+#endif
+
 /*
  * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
  * health information.
@@ -1320,6 +1542,7 @@ static int papr_scm_probe(struct platform_device *pdev)
                goto err2;
 
        platform_set_drvdata(pdev, p);
+       papr_scm_pmu_register(p);
 
        return 0;
 
@@ -1338,6 +1561,12 @@ static int papr_scm_remove(struct platform_device *pdev)
 
        nvdimm_bus_unregister(p->bus);
        drc_pmem_unbind(p);
+
+       if (pdev->archdata.priv)
+               unregister_nvdimm_pmu(pdev->archdata.priv);
+
+       pdev->archdata.priv = NULL;
+       kfree(p->nvdimm_events_map);
        kfree(p->bus_desc.provider_name);
        kfree(p);
 
index ea8ec8a..00fd9c5 100644 (file)
@@ -16,6 +16,7 @@ config RISCV
        select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
        select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
        select ARCH_HAS_BINFMT_FLAT
+       select ARCH_HAS_CURRENT_STACK_POINTER
        select ARCH_HAS_DEBUG_VM_PGTABLE
        select ARCH_HAS_DEBUG_VIRTUAL if MMU
        select ARCH_HAS_DEBUG_WX
@@ -47,6 +48,7 @@ config RISCV
        select CLONE_BACKWARDS
        select CLINT_TIMER if !MMU
        select COMMON_CLK
+       select CPU_PM if CPU_IDLE
        select EDAC_SUPPORT
        select GENERIC_ARCH_TOPOLOGY if SMP
        select GENERIC_ATOMIC64 if !64BIT
@@ -533,4 +535,10 @@ source "kernel/power/Kconfig"
 
 endmenu
 
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
 source "arch/riscv/kvm/Kconfig"
index c112ab2..34592d0 100644 (file)
@@ -36,6 +36,9 @@ config SOC_VIRT
        select GOLDFISH
        select RTC_DRV_GOLDFISH if RTC_CLASS
        select SIFIVE_PLIC
+       select PM_GENERIC_DOMAINS if PM
+       select PM_GENERIC_DOMAINS_OF if PM && OF
+       select RISCV_SBI_CPUIDLE if CPU_IDLE
        help
          This enables support for QEMU Virt Machine.
 
index 984872f..b9e30df 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 7ba99b4..8d23401 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index be9b12c..24fd83b 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 031c0c2..25341f3 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 3eef52b..aad45d7 100644 (file)
                        reg = <0x0 0x10010000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <4>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        status = "disabled";
                };
                dma: dma@3000000 {
                        reg = <0x0 0x10011000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <5>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        status = "disabled";
                };
                i2c0: i2c@10030000 {
                        reg = <0x0 0x10030000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <50>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        reg-shift = <2>;
                        reg-io-width = <1>;
                        #address-cells = <1>;
                              <0x0 0x20000000 0x0 0x10000000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <51>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                              <0x0 0x30000000 0x0 0x10000000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <52>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        reg = <0x0 0x10050000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <6>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                              <0x0 0x100a0000 0x0 0x1000>;
                        local-mac-address = [00 00 00 00 00 00];
                        clock-names = "pclk", "hclk";
-                       clocks = <&prci PRCI_CLK_GEMGXLPLL>,
-                                <&prci PRCI_CLK_GEMGXLPLL>;
+                       clocks = <&prci FU540_PRCI_CLK_GEMGXLPLL>,
+                                <&prci FU540_PRCI_CLK_GEMGXLPLL>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        reg = <0x0 0x10020000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <42>, <43>, <44>, <45>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        #pwm-cells = <3>;
                        status = "disabled";
                };
                        reg = <0x0 0x10021000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <46>, <47>, <48>, <49>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        #pwm-cells = <3>;
                        status = "disabled";
                };
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
-                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       clocks = <&prci FU540_PRCI_CLK_TLCLK>;
                        status = "disabled";
                };
        };
index 8464b0e..7b77c13 100644 (file)
                        reg = <0x0 0x10010000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <39>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        status = "disabled";
                };
                uart1: serial@10011000 {
                        reg = <0x0 0x10011000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <40>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        status = "disabled";
                };
                i2c0: i2c@10030000 {
                        reg = <0x0 0x10030000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <52>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        reg-shift = <2>;
                        reg-io-width = <1>;
                        #address-cells = <1>;
                        reg = <0x0 0x10031000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <53>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        reg-shift = <2>;
                        reg-io-width = <1>;
                        #address-cells = <1>;
                              <0x0 0x20000000 0x0 0x10000000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <41>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                              <0x0 0x30000000 0x0 0x10000000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <42>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        reg = <0x0 0x10050000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <43>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                              <0x0 0x100a0000 0x0 0x1000>;
                        local-mac-address = [00 00 00 00 00 00];
                        clock-names = "pclk", "hclk";
-                       clocks = <&prci PRCI_CLK_GEMGXLPLL>,
-                                <&prci PRCI_CLK_GEMGXLPLL>;
+                       clocks = <&prci FU740_PRCI_CLK_GEMGXLPLL>,
+                                <&prci FU740_PRCI_CLK_GEMGXLPLL>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        reg = <0x0 0x10020000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <44>, <45>, <46>, <47>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        #pwm-cells = <3>;
                        status = "disabled";
                };
                        reg = <0x0 0x10021000 0x0 0x1000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <48>, <49>, <50>, <51>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        #pwm-cells = <3>;
                        status = "disabled";
                };
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
-                       clocks = <&prci PRCI_CLK_PCLK>;
+                       clocks = <&prci FU740_PRCI_CLK_PCLK>;
                        status = "disabled";
                };
                pcie@e00000000 {
                                        <0x0 0x0 0x0 0x3 &plic0 59>,
                                        <0x0 0x0 0x0 0x4 &plic0 60>;
                        clock-names = "pcie_aux";
-                       clocks = <&prci PRCI_CLK_PCIE_AUX>;
+                       clocks = <&prci FU740_PRCI_CLK_PCIE_AUX>;
                        pwren-gpios = <&gpio 5 0>;
                        reset-gpios = <&gpio 8 0>;
                        resets = <&prci 4>;
index 7cd10de..30e3017 100644 (file)
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
 CONFIG_SOC_MICROCHIP_POLARFIRE=y
 CONFIG_SOC_SIFIVE=y
 CONFIG_SOC_VIRT=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
@@ -64,8 +67,6 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
index 3f42ed8..2438fa3 100644 (file)
@@ -21,7 +21,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index af64b95..9a133e6 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index e1c9864..5269fbb 100644 (file)
@@ -19,7 +19,6 @@ CONFIG_EXPERT=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_COMPAT_BRK is not set
index e0e5c7c..7e5efdc 100644 (file)
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
 CONFIG_SOC_SIFIVE=y
 CONFIG_SOC_VIRT=y
 CONFIG_ARCH_RV32I=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
@@ -62,8 +65,6 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
index 618d7c5..8c2549b 100644 (file)
 #error "Unexpected __SIZEOF_SHORT__"
 #endif
 
+#ifdef __ASSEMBLY__
+
+/* Common assembly source macros */
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+       REG_L t0, _xip_fixup
+       add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+       la t1, __data_loc
+       REG_L t1, _xip_phys_offset
+       sub \reg, \reg, t1
+       add \reg, \reg, t0
+.endm
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h
new file mode 100644 (file)
index 0000000..71fdc60
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Allwinner Ltd
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_CPUIDLE_H
+#define _ASM_RISCV_CPUIDLE_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline void cpu_do_idle(void)
+{
+       /*
+        * Add mb() here to ensure that all
+        * IO/MEM accesses are completed prior
+        * to entering WFI.
+        */
+       mb();
+       wait_for_interrupt();
+}
+
+#endif
index 1de233d..21774d8 100644 (file)
@@ -33,6 +33,8 @@ static __always_inline struct task_struct *get_current(void)
 
 #define current get_current()
 
+register unsigned long current_stack_pointer __asm__("sp");
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_RISCV_CURRENT_H */
index 4254ff2..1075bea 100644 (file)
@@ -2,8 +2,8 @@
 /* Copyright (C) 2017 Andes Technology Corporation */
 #ifdef CONFIG_MODULE_SECTIONS
 SECTIONS {
-       .plt (NOLOAD) : { BYTE(0) }
-       .got (NOLOAD) : { BYTE(0) }
-       .got.plt (NOLOAD) : { BYTE(0) }
+       .plt : { BYTE(0) }
+       .got : { BYTE(0) }
+       .got.plt : { BYTE(0) }
 }
 #endif
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
new file mode 100644 (file)
index 0000000..8be391c
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _ASM_RISCV_SUSPEND_H
+#define _ASM_RISCV_SUSPEND_H
+
+#include <asm/ptrace.h>
+
+struct suspend_context {
+       /* Saved and restored by low-level functions */
+       struct pt_regs regs;
+       /* Saved and restored by high-level functions */
+       unsigned long scratch;
+       unsigned long tvec;
+       unsigned long ie;
+#ifdef CONFIG_MMU
+       unsigned long satp;
+#endif
+};
+
+/* Low-level CPU suspend entry function */
+int __cpu_suspend_enter(struct suspend_context *context);
+
+/* High-level CPU suspend which will save context and call finish() */
+int cpu_suspend(unsigned long arg,
+               int (*finish)(unsigned long arg,
+                             unsigned long entry,
+                             unsigned long context));
+
+/* Low-level CPU resume entry function */
+int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+
+#endif
index 60da0dc..74d888c 100644 (file)
 #include <asm/page.h>
 #include <linux/const.h>
 
+#ifdef CONFIG_KASAN
+#define KASAN_STACK_ORDER 1
+#else
+#define KASAN_STACK_ORDER 0
+#endif
+
 /* thread information allocation */
 #ifdef CONFIG_64BIT
-#define THREAD_SIZE_ORDER      (2)
+#define THREAD_SIZE_ORDER      (2 + KASAN_STACK_ORDER)
 #else
-#define THREAD_SIZE_ORDER      (1)
+#define THREAD_SIZE_ORDER      (1 + KASAN_STACK_ORDER)
 #endif
 #define THREAD_SIZE            (PAGE_SIZE << THREAD_SIZE_ORDER)
 
index e0133d1..87adbe4 100644 (file)
@@ -48,6 +48,8 @@ obj-$(CONFIG_RISCV_BOOT_SPINWAIT) += cpu_ops_spinwait.o
 obj-$(CONFIG_MODULES)          += module.o
 obj-$(CONFIG_MODULE_SECTIONS)  += module-sections.o
 
+obj-$(CONFIG_CPU_PM)           += suspend_entry.o suspend.o
+
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o ftrace.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += mcount-dyn.o
 
index df0519a..df94443 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/thread_info.h>
 #include <asm/ptrace.h>
 #include <asm/cpu_ops_sbi.h>
+#include <asm/suspend.h>
 
 void asm_offsets(void);
 
@@ -113,6 +114,8 @@ void asm_offsets(void)
        OFFSET(PT_BADADDR, pt_regs, badaddr);
        OFFSET(PT_CAUSE, pt_regs, cause);
 
+       OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
+
        OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
        OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
        OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
index d2a9361..ccb6177 100644 (file)
@@ -69,11 +69,11 @@ int riscv_of_parent_hartid(struct device_node *node)
                .uprop = #UPROP,                                \
                .isa_ext_id = EXTID,                            \
        }
-/**
+/*
  * Here are the ordering rules of extension naming defined by RISC-V
  * specification :
  * 1. All extensions should be separated from other multi-letter extensions
- *    from other multi-letter extensions by an underscore.
+ *    by an underscore.
  * 2. The first letter following the 'Z' conventionally indicates the most
  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
  *    If multiple 'Z' extensions are named, they should be ordered first
@@ -110,7 +110,7 @@ static void print_isa_ext(struct seq_file *f)
        }
 }
 
-/**
+/*
  * These are the only valid base (single letter) ISA extensions as per the spec.
  * It also specifies the canonical order in which it appears in the spec.
  * Some of the extension may just be a place holder for now (B, K, P, J).
index 2e16f67..4f5a6f8 100644 (file)
@@ -21,7 +21,7 @@ const struct cpu_operations cpu_ops_sbi;
  * be invoked from multiple threads in parallel. Define a per cpu data
  * to handle that.
  */
-DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
+static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
 
 static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
                              unsigned long priv)
index ec07f99..893b8bb 100644 (file)
 #include <asm/image.h>
 #include "efi-header.S"
 
-#ifdef CONFIG_XIP_KERNEL
-.macro XIP_FIXUP_OFFSET reg
-       REG_L t0, _xip_fixup
-       add \reg, \reg, t0
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-       la t0, __data_loc
-       REG_L t1, _xip_phys_offset
-       sub \reg, \reg, t1
-       add \reg, \reg, t0
-.endm
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
-#else
-.macro XIP_FIXUP_OFFSET reg
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-.endm
-#endif /* CONFIG_XIP_KERNEL */
-
 __HEAD
 ENTRY(_start)
        /*
@@ -89,7 +69,8 @@ pe_head_start:
 
 .align 2
 #ifdef CONFIG_MMU
-relocate:
+       .global relocate_enable_mmu
+relocate_enable_mmu:
        /* Relocate return address */
        la a1, kernel_map
        XIP_FIXUP_OFFSET a1
@@ -184,7 +165,7 @@ secondary_start_sbi:
        /* Enable virtual memory and relocate to virtual address */
        la a0, swapper_pg_dir
        XIP_FIXUP_OFFSET a0
-       call relocate
+       call relocate_enable_mmu
 #endif
        call setup_trap_vector
        tail smp_callin
@@ -328,7 +309,7 @@ clear_bss_done:
 #ifdef CONFIG_MMU
        la a0, early_pg_dir
        XIP_FIXUP_OFFSET a0
-       call relocate
+       call relocate_enable_mmu
 #endif /* CONFIG_MMU */
 
        call setup_trap_vector
index 4a48287..c29cef9 100644 (file)
@@ -69,7 +69,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
        return 0;
 }
 
-static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
        ptrdiff_t offset = (void *)v - (void *)location;
@@ -301,7 +301,7 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
        [R_RISCV_64]                    = apply_r_riscv_64_rela,
        [R_RISCV_BRANCH]                = apply_r_riscv_branch_rela,
        [R_RISCV_JAL]                   = apply_r_riscv_jal_rela,
-       [R_RISCV_RVC_BRANCH]            = apply_r_riscv_rcv_branch_rela,
+       [R_RISCV_RVC_BRANCH]            = apply_r_riscv_rvc_branch_rela,
        [R_RISCV_RVC_JUMP]              = apply_r_riscv_rvc_jump_rela,
        [R_RISCV_PCREL_HI20]            = apply_r_riscv_pcrel_hi20_rela,
        [R_RISCV_PCREL_LO12_I]          = apply_r_riscv_pcrel_lo12_i_rela,
index 55faa49..3348a61 100644 (file)
@@ -68,7 +68,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
 
 static bool fill_callchain(void *entry, unsigned long pc)
 {
-       return perf_callchain_store(entry, pc);
+       return perf_callchain_store(entry, pc) == 0;
 }
 
 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
index 03ac3aa..504b496 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/string.h>
 #include <asm/switch_to.h>
 #include <asm/thread_info.h>
+#include <asm/cpuidle.h>
 
 register unsigned long gp_in_global __asm__("gp");
 
@@ -37,7 +38,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
 
 void arch_cpu_idle(void)
 {
-       wait_for_interrupt();
+       cpu_do_idle();
        raw_local_irq_enable();
 }
 
index 14d2b53..08d11a5 100644 (file)
@@ -14,8 +14,6 @@
 
 #include <asm/stacktrace.h>
 
-register unsigned long sp_in_global __asm__("sp");
-
 #ifdef CONFIG_FRAME_POINTER
 
 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
@@ -30,7 +28,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
                fp = (unsigned long)__builtin_frame_address(0);
-               sp = sp_in_global;
+               sp = current_stack_pointer;
                pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
@@ -78,7 +76,7 @@ void notrace walk_stackframe(struct task_struct *task,
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
-               sp = sp_in_global;
+               sp = current_stack_pointer;
                pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
new file mode 100644 (file)
index 0000000..9ba24fb
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/ftrace.h>
+#include <asm/csr.h>
+#include <asm/suspend.h>
+
+static void suspend_save_csrs(struct suspend_context *context)
+{
+       context->scratch = csr_read(CSR_SCRATCH);
+       context->tvec = csr_read(CSR_TVEC);
+       context->ie = csr_read(CSR_IE);
+
+       /*
+        * No need to save/restore IP CSR (i.e. MIP or SIP) because:
+        *
+        * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
+        *    external devices (such as interrupt controller, timer, etc).
+        * 2. For MMU (S-mode) kernel, the bits in SIP are set by
+        *    M-mode firmware and external devices (such as interrupt
+        *    controller, etc).
+        */
+
+#ifdef CONFIG_MMU
+       context->satp = csr_read(CSR_SATP);
+#endif
+}
+
+static void suspend_restore_csrs(struct suspend_context *context)
+{
+       csr_write(CSR_SCRATCH, context->scratch);
+       csr_write(CSR_TVEC, context->tvec);
+       csr_write(CSR_IE, context->ie);
+
+#ifdef CONFIG_MMU
+       csr_write(CSR_SATP, context->satp);
+#endif
+}
+
+int cpu_suspend(unsigned long arg,
+               int (*finish)(unsigned long arg,
+                             unsigned long entry,
+                             unsigned long context))
+{
+       int rc = 0;
+       struct suspend_context context = { 0 };
+
+       /* Finisher should be non-NULL */
+       if (!finish)
+               return -EINVAL;
+
+       /* Save additional CSRs*/
+       suspend_save_csrs(&context);
+
+       /*
+        * Function graph tracer state gets incosistent when the kernel
+        * calls functions that never return (aka finishers) hence disable
+        * graph tracing during their execution.
+        */
+       pause_graph_tracing();
+
+       /* Save context on stack */
+       if (__cpu_suspend_enter(&context)) {
+               /* Call the finisher */
+               rc = finish(arg, __pa_symbol(__cpu_resume_enter),
+                           (ulong)&context);
+
+               /*
+                * Should never reach here, unless the suspend finisher
+                * fails. Successful cpu_suspend() should return from
+                * __cpu_resume_entry()
+                */
+               if (!rc)
+                       rc = -EOPNOTSUPP;
+       }
+
+       /* Enable function graph tracer */
+       unpause_graph_tracing();
+
+       /* Restore additional CSRs */
+       suspend_restore_csrs(&context);
+
+       return rc;
+}
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
new file mode 100644 (file)
index 0000000..4b07b80
--- /dev/null
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+       .text
+       .altmacro
+       .option norelax
+
+ENTRY(__cpu_suspend_enter)
+       /* Save registers (except A0 and T0-T6) */
+       REG_S   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+       REG_S   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+       REG_S   gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+       REG_S   tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+       REG_S   s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+       REG_S   s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+       REG_S   a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+       REG_S   a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+       REG_S   a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+       REG_S   a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+       REG_S   a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+       REG_S   a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+       REG_S   a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+       REG_S   s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+       REG_S   s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+       REG_S   s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+       REG_S   s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+       REG_S   s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+       REG_S   s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+       REG_S   s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+       REG_S   s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+       REG_S   s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+       REG_S   s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+       /* Save CSRs */
+       csrr    t0, CSR_EPC
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+       csrr    t0, CSR_STATUS
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+       csrr    t0, CSR_TVAL
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+       csrr    t0, CSR_CAUSE
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+
+       /* Return non-zero value */
+       li      a0, 1
+
+       /* Return to C code */
+       ret
+END(__cpu_suspend_enter)
+
+ENTRY(__cpu_resume_enter)
+       /* Load the global pointer */
+       .option push
+       .option norelax
+               la gp, __global_pointer$
+       .option pop
+
+#ifdef CONFIG_MMU
+       /* Save A0 and A1 */
+       add     t0, a0, zero
+       add     t1, a1, zero
+
+       /* Enable MMU */
+       la      a0, swapper_pg_dir
+       XIP_FIXUP_OFFSET a0
+       call    relocate_enable_mmu
+
+       /* Restore A0 and A1 */
+       add     a0, t0, zero
+       add     a1, t1, zero
+#endif
+
+       /* Make A0 point to suspend context */
+       add     a0, a1, zero
+
+       /* Restore CSRs */
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+       csrw    CSR_EPC, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+       csrw    CSR_STATUS, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+       csrw    CSR_TVAL, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+       csrw    CSR_CAUSE, t0
+
+       /* Restore registers (except A0 and T0-T6) */
+       REG_L   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+       REG_L   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+       REG_L   gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+       REG_L   tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+       REG_L   s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+       REG_L   s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+       REG_L   a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+       REG_L   a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+       REG_L   a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+       REG_L   a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+       REG_L   a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+       REG_L   a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+       REG_L   a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+       REG_L   s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+       REG_L   s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+       REG_L   s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+       REG_L   s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+       REG_L   s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+       REG_L   s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+       REG_L   s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+       REG_L   s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+       REG_L   s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+       REG_L   s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+       /* Return zero value */
+       add     a0, zero, zero
+
+       /* Return to C code */
+       ret
+END(__cpu_resume_enter)
index 9b80e8b..77b5a03 100644 (file)
@@ -58,6 +58,7 @@ config S390
        select ALTERNATE_USER_ADDRESS_SPACE
        select ARCH_32BIT_USTAT_F_TINODE
        select ARCH_BINFMT_ELF_STATE
+       select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
        select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
        select ARCH_ENABLE_MEMORY_HOTREMOVE
        select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
index 955d620..bb3837d 100644 (file)
  * a 2-byte nop if the size of the area is not divisible by 6.
  */
 .macro alt_pad_fill bytes
-       .fill   ( \bytes ) / 6, 6, 0xc0040000
-       .fill   ( \bytes ) % 6 / 4, 4, 0x47000000
-       .fill   ( \bytes ) % 6 % 4 / 2, 2, 0x0700
+       .rept   ( \bytes ) / 6
+       brcl    0,0
+       .endr
+       .rept   ( \bytes ) % 6 / 4
+       nop
+       .endr
+       .rept   ( \bytes ) % 6 % 4 / 2
+       nopr
+       .endr
 .endm
 
 /*
index d3880ca..3f2856e 100644 (file)
@@ -71,11 +71,18 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
        ".if " oldinstr_pad_len(num) " > 6\n"                           \
        "\tjg " e_oldinstr_pad_end "f\n"                                \
        "6620:\n"                                                       \
-       "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+       "\t.rept (" oldinstr_pad_len(num) " - (6620b-662b)) / 2\n"      \
+       "\tnopr\n"                                                      \
        ".else\n"                                                       \
-       "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n"        \
-       "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n"   \
-       "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n"  \
+       "\t.rept " oldinstr_pad_len(num) " / 6\n"                       \
+       "\t.brcl 0,0\n"                                                 \
+       "\t.endr\n"                                                     \
+       "\t.rept " oldinstr_pad_len(num) " %% 6 / 4\n"                  \
+       "\tnop\n"                                                       \
+       "\t.endr\n"                                                     \
+       "\t.rept " oldinstr_pad_len(num) " %% 6 %% 4 / 2\n"             \
+       "\tnopr\n"                                                      \
+       ".endr\n"                                                       \
        ".endif\n"
 
 #define OLDINSTR(oldinstr, num)                                                \
index ae75da5..b515cfa 100644 (file)
@@ -60,11 +60,11 @@ static inline bool ap_instructions_available(void)
        unsigned long reg1 = 0;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid into gr0 */
-               "       lghi    1,0\n"         /* 0 into gr1 */
-               "       lghi    2,0\n"         /* 0 into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(TAPQ) */
-               "0:     la      %[reg1],1\n"   /* 1 into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid into gr0 */
+               "       lghi    1,0\n"                  /* 0 into gr1 */
+               "       lghi    2,0\n"                  /* 0 into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(TAPQ) */
+               "0:     la      %[reg1],1\n"            /* 1 into reg1 */
                "1:\n"
                EX_TABLE(0b, 1b)
                : [reg1] "+&d" (reg1)
@@ -86,11 +86,11 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
        unsigned long reg2;
 
        asm volatile(
-               "       lgr     0,%[qid]\n"    /* qid into gr0 */
-               "       lghi    2,0\n"         /* 0 into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(TAPQ) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
-               "       lgr     %[reg2],2\n"   /* gr2 into reg2 */
+               "       lgr     0,%[qid]\n"             /* qid into gr0 */
+               "       lghi    2,0\n"                  /* 0 into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(TAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
+               "       lgr     %[reg2],2\n"            /* gr2 into reg2 */
                : [reg1] "=&d" (reg1), [reg2] "=&d" (reg2)
                : [qid] "d" (qid)
                : "cc", "0", "1", "2");
@@ -128,9 +128,9 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
        struct ap_queue_status reg1;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"  /* qid arg into gr0 */
-               "       .long   0xb2af0000\n" /* PQAP(RAPQ) */
-               "       lgr     %[reg1],1\n"  /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid arg into gr0 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(RAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "=&d" (reg1)
                : [reg0] "d" (reg0)
                : "cc", "0", "1");
@@ -149,9 +149,9 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
        struct ap_queue_status reg1;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid arg into gr0 */
-               "       .long   0xb2af0000\n"  /* PQAP(ZAPQ) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid arg into gr0 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(ZAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "=&d" (reg1)
                : [reg0] "d" (reg0)
                : "cc", "0", "1");
@@ -190,10 +190,10 @@ static inline int ap_qci(struct ap_config_info *config)
        struct ap_config_info *reg2 = config;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* QCI fc into gr0 */
-               "       lgr     2,%[reg2]\n"   /* ptr to config into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(QCI) */
-               "0:     la      %[reg1],0\n"   /* good case, QCI fc available */
+               "       lgr     0,%[reg0]\n"            /* QCI fc into gr0 */
+               "       lgr     2,%[reg2]\n"            /* ptr to config into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(QCI) */
+               "0:     la      %[reg1],0\n"            /* good case, QCI fc available */
                "1:\n"
                EX_TABLE(0b, 1b)
                : [reg1] "+&d" (reg1)
@@ -246,11 +246,11 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
        reg1.qirqctrl = qirqctrl;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid param into gr0 */
-               "       lgr     1,%[reg1]\n"   /* irq ctrl into gr1 */
-               "       lgr     2,%[reg2]\n"   /* ni addr into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(AQIC) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid param into gr0 */
+               "       lgr     1,%[reg1]\n"            /* irq ctrl into gr1 */
+               "       lgr     2,%[reg2]\n"            /* ni addr into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(AQIC) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "+&d" (reg1)
                : [reg0] "d" (reg0), [reg2] "d" (reg2)
                : "cc", "0", "1", "2");
@@ -297,11 +297,11 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
        reg1.value = apinfo->val;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid param into gr0 */
-               "       lgr     1,%[reg1]\n"   /* qact in info into gr1 */
-               "       .long   0xb2af0000\n"  /* PQAP(QACT) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
-               "       lgr     %[reg2],2\n"   /* qact out info into reg2 */
+               "       lgr     0,%[reg0]\n"            /* qid param into gr0 */
+               "       lgr     1,%[reg1]\n"            /* qact in info into gr1 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(QACT) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
+               "       lgr     %[reg2],2\n"            /* qact out info into reg2 */
                : [reg1] "+&d" (reg1), [reg2] "=&d" (reg2)
                : [reg0] "d" (reg0)
                : "cc", "0", "1", "2");
index c800199..82388da 100644 (file)
@@ -74,8 +74,17 @@ static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
        __ctl_load(reg, cr, cr);
 }
 
-void smp_ctl_set_bit(int cr, int bit);
-void smp_ctl_clear_bit(int cr, int bit);
+void smp_ctl_set_clear_bit(int cr, int bit, bool set);
+
+static inline void ctl_set_bit(int cr, int bit)
+{
+       smp_ctl_set_clear_bit(cr, bit, true);
+}
+
+static inline void ctl_clear_bit(int cr, int bit)
+{
+       smp_ctl_set_clear_bit(cr, bit, false);
+}
 
 union ctlreg0 {
        unsigned long val;
@@ -130,8 +139,5 @@ union ctlreg15 {
        };
 };
 
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_CTL_REG_H */
index 84ec631..eee8d96 100644 (file)
@@ -319,11 +319,18 @@ extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
 extern int memcpy_real(void *, unsigned long, size_t);
 extern void memcpy_absolute(void *, void *, size_t);
 
-#define mem_assign_absolute(dest, val) do {                    \
-       __typeof__(dest) __tmp = (val);                         \
-                                                               \
-       BUILD_BUG_ON(sizeof(__tmp) != sizeof(val));             \
-       memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));        \
+#define put_abs_lowcore(member, x) do {                                        \
+       unsigned long __abs_address = offsetof(struct lowcore, member); \
+       __typeof__(((struct lowcore *)0)->member) __tmp = (x);          \
+                                                                       \
+       memcpy_absolute(__va(__abs_address), &__tmp, sizeof(__tmp));    \
+} while (0)
+
+#define get_abs_lowcore(x, member) do {                                        \
+       unsigned long __abs_address = offsetof(struct lowcore, member); \
+       __typeof__(((struct lowcore *)0)->member) *__ptr = &(x);        \
+                                                                       \
+       memcpy_absolute(__ptr, __va(__abs_address), sizeof(*__ptr));    \
 } while (0)
 
 extern int s390_isolate_bp(void);
index 888a2f1..24a5444 100644 (file)
@@ -78,7 +78,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
 {
        typecheck(int, lp->lock);
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
                "       sth     %1,%0\n"
                : "=R" (((unsigned short *) &lp->lock)[1])
                : "d" (0) : "cc", "memory");
index ad2c996..fde7e6b 100644 (file)
        __diag_pop();                                                                   \
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
-#endif /* _ASM_X86_SYSCALL_WRAPPER_H */
+#endif /* _ASM_S390_SYSCALL_WRAPPER_H */
index 5ebf534..0bf06f1 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <linux/sched.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
+#include <linux/llist.h>
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
@@ -36,10 +38,21 @@ struct unwind_state {
        struct pt_regs *regs;
        unsigned long sp, ip;
        int graph_idx;
+       struct llist_node *kr_cur;
        bool reliable;
        bool error;
 };
 
+/* Recover the return address modified by kretprobe and ftrace_graph. */
+static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state,
+                                                   unsigned long ip)
+{
+       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
+       if (is_kretprobe_trampoline(ip))
+               ip = kretprobe_find_ret_addr(state->task, (void *)state->sp, &state->kr_cur);
+       return ip;
+}
+
 void __unwind_start(struct unwind_state *state, struct task_struct *task,
                    struct pt_regs *regs, unsigned long first_frame);
 bool unwind_next_frame(struct unwind_state *state);
index a601a51..59b69c8 100644 (file)
@@ -121,22 +121,22 @@ _LPP_OFFSET       = __LC_LPP
        .endm
 
        .macro BPOFF
-       ALTERNATIVE "", ".long 0xb2e8c000", 82
+       ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82
        .endm
 
        .macro BPON
-       ALTERNATIVE "", ".long 0xb2e8d000", 82
+       ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82
        .endm
 
        .macro BPENTER tif_ptr,tif_mask
-       ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
+       ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
                    "", 82
        .endm
 
        .macro BPEXIT tif_ptr,tif_mask
        TSTMSK  \tif_ptr,\tif_mask
-       ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
-                   "jnz .+8; .long 0xb2e8d000", 82
+       ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
+                   "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
        .endm
 
        /*
index 28ae7df..1cc85b8 100644 (file)
@@ -1646,8 +1646,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
 
        csum = (__force unsigned int)
               csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
-       mem_assign_absolute(S390_lowcore.ipib, ipib);
-       mem_assign_absolute(S390_lowcore.ipib_checksum, csum);
+       put_abs_lowcore(ipib, ipib);
+       put_abs_lowcore(ipib_checksum, csum);
        dump_run(trigger);
 }
 
index e32c14f..0032bdb 100644 (file)
@@ -284,11 +284,11 @@ NOKPROBE_SYMBOL(pop_kprobe);
 
 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
-       ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
-       ri->fp = NULL;
+       ri->ret_addr = (kprobe_opcode_t *)regs->gprs[14];
+       ri->fp = (void *)regs->gprs[15];
 
        /* Replace the return addr with trampoline addr */
-       regs->gprs[14] = (unsigned long) &__kretprobe_trampoline;
+       regs->gprs[14] = (unsigned long)&__kretprobe_trampoline;
 }
 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
 
@@ -385,7 +385,7 @@ NOKPROBE_SYMBOL(arch_kretprobe_fixup_return);
  */
 void trampoline_probe_handler(struct pt_regs *regs)
 {
-       kretprobe_trampoline_handler(regs, NULL);
+       kretprobe_trampoline_handler(regs, (void *)regs->gprs[15]);
 }
 NOKPROBE_SYMBOL(trampoline_probe_handler);
 
index 088d57a..b2ef014 100644 (file)
@@ -226,7 +226,7 @@ void arch_crash_save_vmcoreinfo(void)
        vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
        vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
        vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-       mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
+       put_abs_lowcore(vmcore_info, paddr_vmcoreinfo_note());
 }
 
 void machine_shutdown(void)
index 6b5b64e..1acc2e0 100644 (file)
@@ -63,7 +63,7 @@ void __init os_info_init(void)
        os_info.version_minor = OS_INFO_VERSION_MINOR;
        os_info.magic = OS_INFO_MAGIC;
        os_info.csum = os_info_csum(&os_info);
-       mem_assign_absolute(S390_lowcore.os_info, __pa(ptr));
+       put_abs_lowcore(os_info, __pa(ptr));
 }
 
 #ifdef CONFIG_CRASH_DUMP
index 84e23fc..d860ac3 100644 (file)
@@ -481,11 +481,11 @@ static void __init setup_lowcore_dat_off(void)
        lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
 
        /* Setup absolute zero lowcore */
-       mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
-       mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
-       mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
-       mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
-       mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
+       put_abs_lowcore(restart_stack, lc->restart_stack);
+       put_abs_lowcore(restart_fn, lc->restart_fn);
+       put_abs_lowcore(restart_data, lc->restart_data);
+       put_abs_lowcore(restart_source, lc->restart_source);
+       put_abs_lowcore(restart_psw, lc->restart_psw);
 
        lc->spinlock_lockval = arch_spin_lockval(0);
        lc->spinlock_index = 0;
@@ -501,6 +501,7 @@ static void __init setup_lowcore_dat_off(void)
 static void __init setup_lowcore_dat_on(void)
 {
        struct lowcore *lc = lowcore_ptr[0];
+       int cr;
 
        __ctl_clear_bit(0, 28);
        S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
@@ -509,10 +510,10 @@ static void __init setup_lowcore_dat_on(void)
        S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
        __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        __ctl_set_bit(0, 28);
-       mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
-       mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
-       memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
-                       sizeof(S390_lowcore.cregs_save_area));
+       put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS);
+       put_abs_lowcore(program_new_psw, lc->program_new_psw);
+       for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++)
+               put_abs_lowcore(cregs_save_area[cr], lc->cregs_save_area[cr]);
 }
 
 static struct resource code_resource = {
index 127da18..30c91d5 100644 (file)
@@ -213,7 +213,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
        if (nmi_alloc_mcesa(&lc->mcesad))
                goto out;
        lowcore_ptr[cpu] = lc;
-       pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
+       pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
        return 0;
 
 out:
@@ -326,10 +326,17 @@ static void pcpu_delegate(struct pcpu *pcpu,
        /* Stop target cpu (if func returns this stops the current cpu). */
        pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
        /* Restart func on the target cpu and stop the current cpu. */
-       mem_assign_absolute(lc->restart_stack, stack);
-       mem_assign_absolute(lc->restart_fn, (unsigned long) func);
-       mem_assign_absolute(lc->restart_data, (unsigned long) data);
-       mem_assign_absolute(lc->restart_source, source_cpu);
+       if (lc) {
+               lc->restart_stack = stack;
+               lc->restart_fn = (unsigned long)func;
+               lc->restart_data = (unsigned long)data;
+               lc->restart_source = source_cpu;
+       } else {
+               put_abs_lowcore(restart_stack, stack);
+               put_abs_lowcore(restart_fn, (unsigned long)func);
+               put_abs_lowcore(restart_data, (unsigned long)data);
+               put_abs_lowcore(restart_source, source_cpu);
+       }
        __bpon();
        asm volatile(
                "0:     sigp    0,%0,%2 # sigp restart to target cpu\n"
@@ -570,39 +577,27 @@ static void smp_ctl_bit_callback(void *info)
 }
 
 static DEFINE_SPINLOCK(ctl_lock);
-static unsigned long ctlreg;
 
-/*
- * Set a bit in a control register of all cpus
- */
-void smp_ctl_set_bit(int cr, int bit)
+void smp_ctl_set_clear_bit(int cr, int bit, bool set)
 {
-       struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
-
-       spin_lock(&ctl_lock);
-       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
-       __set_bit(bit, &ctlreg);
-       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
-       spin_unlock(&ctl_lock);
-       on_each_cpu(smp_ctl_bit_callback, &parms, 1);
-}
-EXPORT_SYMBOL(smp_ctl_set_bit);
-
-/*
- * Clear a bit in a control register of all cpus
- */
-void smp_ctl_clear_bit(int cr, int bit)
-{
-       struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
+       struct ec_creg_mask_parms parms = { .cr = cr, };
+       u64 ctlreg;
 
+       if (set) {
+               parms.orval = 1UL << bit;
+               parms.andval = -1UL;
+       } else {
+               parms.orval = 0;
+               parms.andval = ~(1UL << bit);
+       }
        spin_lock(&ctl_lock);
-       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
-       __clear_bit(bit, &ctlreg);
-       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       get_abs_lowcore(ctlreg, cregs_save_area[cr]);
+       ctlreg = (ctlreg & parms.andval) | parms.orval;
+       put_abs_lowcore(cregs_save_area[cr], ctlreg);
        spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
-EXPORT_SYMBOL(smp_ctl_clear_bit);
+EXPORT_SYMBOL(smp_ctl_set_clear_bit);
 
 #ifdef CONFIG_CRASH_DUMP
 
index b98f250..fb85e79 100644 (file)
@@ -21,8 +21,7 @@ uapi: $(uapi-hdrs-y)
 
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 filechk_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $<
 
index 674c650..1d2aa44 100644 (file)
@@ -141,10 +141,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
        do_trap(regs, SIGFPE, si_code, "floating point exception");
 }
 
-static void translation_exception(struct pt_regs *regs)
+static void translation_specification_exception(struct pt_regs *regs)
 {
        /* May never happen. */
-       panic("Translation exception");
+       panic("Translation-Specification Exception");
 }
 
 static void illegal_op(struct pt_regs *regs)
@@ -368,7 +368,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
        [0x0f]          = hfp_divide_exception,
        [0x10]          = do_dat_exception,
        [0x11]          = do_dat_exception,
-       [0x12]          = translation_exception,
+       [0x12]          = translation_specification_exception,
        [0x13]          = special_op_exception,
        [0x14]          = default_trap_handler,
        [0x15]          = operand_exception,
index 707fd99..0ece156 100644 (file)
@@ -64,8 +64,8 @@ bool unwind_next_frame(struct unwind_state *state)
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
                reliable = false;
                regs = NULL;
-               if (!__kernel_text_address(ip)) {
-                       /* skip bogus %r14 */
+               /* skip bogus %r14 or if is the same as regs->psw.addr */
+               if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) {
                        state->regs = NULL;
                        return unwind_next_frame(state);
                }
@@ -103,13 +103,11 @@ bool unwind_next_frame(struct unwind_state *state)
        if (sp & 0x7)
                goto out_err;
 
-       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp);
-
        /* Update unwind state */
        state->sp = sp;
-       state->ip = ip;
        state->regs = regs;
        state->reliable = reliable;
+       state->ip = unwind_recover_ret_addr(state, ip);
        return true;
 
 out_err:
@@ -161,12 +159,10 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
        }
 
-       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
-
        /* Update unwind state */
        state->sp = sp;
-       state->ip = ip;
        state->reliable = true;
+       state->ip = unwind_recover_ret_addr(state, ip);
 
        if (!first_frame)
                return;
index 692dc84..5e7ea8b 100644 (file)
@@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock)
        int owner;
 
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
                "       l       %0,%1\n"
                : "=d" (owner) : "Q" (*lock) : "memory");
        return owner;
@@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
        int expected = old;
 
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
                "       cs      %0,%3,%1\n"
                : "=d" (old), "=Q" (*lock)
                : "0" (old), "d" (new), "Q" (*lock)
index c01f028..9bb0673 100644 (file)
@@ -47,7 +47,7 @@ static void print_backtrace(char *bt)
 static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                                unsigned long sp)
 {
-       int frame_count, prev_is_func2, seen_func2_func1;
+       int frame_count, prev_is_func2, seen_func2_func1, seen_kretprobe_trampoline;
        const int max_frames = 128;
        struct unwind_state state;
        size_t bt_pos = 0;
@@ -63,6 +63,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
        frame_count = 0;
        prev_is_func2 = 0;
        seen_func2_func1 = 0;
+       seen_kretprobe_trampoline = 0;
        unwind_for_each_frame(&state, task, regs, sp) {
                unsigned long addr = unwind_get_return_address(&state);
                char sym[KSYM_SYMBOL_LEN];
@@ -88,6 +89,8 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
                        seen_func2_func1 = 1;
                prev_is_func2 = str_has_prefix(sym, "unwindme_func2");
+               if (str_has_prefix(sym, "__kretprobe_trampoline+0x0/"))
+                       seen_kretprobe_trampoline = 1;
        }
 
        /* Check the results. */
@@ -103,6 +106,10 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                kunit_err(current_test, "Maximum number of frames exceeded\n");
                ret = -EINVAL;
        }
+       if (seen_kretprobe_trampoline) {
+               kunit_err(current_test, "__kretprobe_trampoline+0x0 in unwinding results\n");
+               ret = -EINVAL;
+       }
        if (ret || force_bt)
                print_backtrace(bt);
        kfree(bt);
@@ -132,36 +139,50 @@ static struct unwindme *unwindme;
 #define UWM_PGM                        0x40    /* Unwind from program check handler */
 #define UWM_KPROBE_ON_FTRACE   0x80    /* Unwind from kprobe handler called via ftrace. */
 #define UWM_FTRACE             0x100   /* Unwind from ftrace handler. */
-#define UWM_KRETPROBE          0x200   /* Unwind kretprobe handlers. */
+#define UWM_KRETPROBE          0x200   /* Unwind through kretprobed function. */
+#define UWM_KRETPROBE_HANDLER  0x400   /* Unwind from kretprobe handler. */
 
-static __always_inline unsigned long get_psw_addr(void)
+static __always_inline struct pt_regs fake_pt_regs(void)
 {
-       unsigned long psw_addr;
+       struct pt_regs regs;
+
+       memset(&regs, 0, sizeof(regs));
+       regs.gprs[15] = current_stack_pointer();
 
        asm volatile(
                "basr   %[psw_addr],0\n"
-               : [psw_addr] "=d" (psw_addr));
-       return psw_addr;
+               : [psw_addr] "=d" (regs.psw.addr));
+       return regs;
 }
 
 static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        struct unwindme *u = unwindme;
 
+       if (!(u->flags & UWM_KRETPROBE_HANDLER))
+               return 0;
+
        u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
                             (u->flags & UWM_SP) ? u->sp : 0);
 
        return 0;
 }
 
-static noinline notrace void test_unwind_kretprobed_func(void)
+static noinline notrace int test_unwind_kretprobed_func(struct unwindme *u)
 {
-       asm volatile("  nop\n");
+       struct pt_regs regs;
+
+       if (!(u->flags & UWM_KRETPROBE))
+               return 0;
+
+       regs = fake_pt_regs();
+       return test_unwind(NULL, (u->flags & UWM_REGS) ? &regs : NULL,
+                          (u->flags & UWM_SP) ? u->sp : 0);
 }
 
-static noinline void test_unwind_kretprobed_func_caller(void)
+static noinline int test_unwind_kretprobed_func_caller(struct unwindme *u)
 {
-       test_unwind_kretprobed_func();
+       return test_unwind_kretprobed_func(u);
 }
 
 static int test_unwind_kretprobe(struct unwindme *u)
@@ -187,10 +208,12 @@ static int test_unwind_kretprobe(struct unwindme *u)
                return -EINVAL;
        }
 
-       test_unwind_kretprobed_func_caller();
+       ret = test_unwind_kretprobed_func_caller(u);
        unregister_kretprobe(&my_kretprobe);
        unwindme = NULL;
-       return u->ret;
+       if (u->flags & UWM_KRETPROBE_HANDLER)
+               ret = u->ret;
+       return ret;
 }
 
 static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs)
@@ -304,16 +327,13 @@ static noinline int unwindme_func4(struct unwindme *u)
                return 0;
        } else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) {
                return test_unwind_kprobe(u);
-       } else if (u->flags & (UWM_KRETPROBE)) {
+       } else if (u->flags & (UWM_KRETPROBE | UWM_KRETPROBE_HANDLER)) {
                return test_unwind_kretprobe(u);
        } else if (u->flags & UWM_FTRACE) {
                return test_unwind_ftrace(u);
        } else {
-               struct pt_regs regs;
+               struct pt_regs regs = fake_pt_regs();
 
-               memset(&regs, 0, sizeof(regs));
-               regs.psw.addr = get_psw_addr();
-               regs.gprs[15] = current_stack_pointer();
                return test_unwind(NULL,
                                   (u->flags & UWM_REGS) ? &regs : NULL,
                                   (u->flags & UWM_SP) ? u->sp : 0);
@@ -452,6 +472,10 @@ static const struct test_params param_list[] = {
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP),
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS),
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_REGS),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP | UWM_REGS),
 };
 
 /*
index 792f8e0..e563cb6 100644 (file)
@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
        list_for_each_entry(tmp, &zpci_list, entry) {
                if (tmp->fid == fid) {
                        zdev = tmp;
+                       zpci_zdev_get(zdev);
                        break;
                }
        }
@@ -399,7 +400,7 @@ EXPORT_SYMBOL(pci_iounmap);
 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
                    int size, u32 *val)
 {
-       struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+       struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
 
        return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
 }
@@ -407,7 +408,7 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
                     int size, u32 val)
 {
-       struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+       struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
 
        return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
 }
index e359d26..e96c986 100644 (file)
@@ -19,7 +19,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
 void zpci_release_device(struct kref *kref);
 static inline void zpci_zdev_put(struct zpci_dev *zdev)
 {
-       kref_put(&zdev->kref, zpci_release_device);
+       if (zdev)
+               kref_put(&zdev->kref, zpci_release_device);
 }
 
 static inline void zpci_zdev_get(struct zpci_dev *zdev)
@@ -32,8 +33,8 @@ void zpci_free_domain(int domain);
 int zpci_setup_bus_resources(struct zpci_dev *zdev,
                             struct list_head *resources);
 
-static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
-                                              unsigned int devfn)
+static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
+                                            unsigned int devfn)
 {
        struct zpci_bus *zbus = bus->sysdata;
 
index 63f3e05..1057d7a 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/clp.h>
 #include <uapi/asm/clp.h>
 
+#include "pci_bus.h"
+
 bool zpci_unique_uid;
 
 void update_uid_checking(bool new)
@@ -404,8 +406,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
                return;
 
        zdev = get_zdev_by_fid(entry->fid);
-       if (!zdev)
-               zpci_create_device(entry->fid, entry->fh, entry->config_state);
+       if (zdev) {
+               zpci_zdev_put(zdev);
+               return;
+       }
+       zpci_create_device(entry->fid, entry->fh, entry->config_state);
 }
 
 int clp_scan_pci_devices(void)
index 2e3e5b2..ea9db5c 100644 (file)
@@ -269,7 +269,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
               pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
 
        if (!pdev)
-               return;
+               goto no_pdev;
 
        switch (ccdf->pec) {
        case 0x003a: /* Service Action or Error Recovery Successful */
@@ -286,6 +286,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
                break;
        }
        pci_dev_put(pdev);
+no_pdev:
+       zpci_zdev_put(zdev);
 }
 
 void zpci_event_error(void *data)
@@ -314,6 +316,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
 static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 {
        struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+       bool existing_zdev = !!zdev;
        enum zpci_state state;
 
        zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
@@ -378,6 +381,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
        default:
                break;
        }
+       if (existing_zdev)
+               zpci_zdev_put(zdev);
 }
 
 void zpci_event_availability(void *data)
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index d63f18d..8440c16 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index f2fe63b..320b09c 100644 (file)
@@ -75,6 +75,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
                -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
                -idirafter $(objtree)/include -D__KERNEL__ -D__UM_HOST__
 
+ifdef CONFIG_CC_IS_CLANG
+USER_CFLAGS := $(patsubst -mno-global-merge,,$(USER_CFLAGS))
+endif
+
 #This will adjust *FLAGS accordingly to the platform.
 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
 
index 6ead1e2..8ca67a6 100644 (file)
@@ -224,7 +224,7 @@ void mconsole_go(struct mc_request *req)
 
 void mconsole_stop(struct mc_request *req)
 {
-       deactivate_fd(req->originating_fd, MCONSOLE_IRQ);
+       block_signals();
        os_set_fd_block(req->originating_fd, 1);
        mconsole_reply(req, "stopped", 0, 0);
        for (;;) {
@@ -247,6 +247,7 @@ void mconsole_stop(struct mc_request *req)
        }
        os_set_fd_block(req->originating_fd, 0);
        mconsole_reply(req, "", 0, 0);
+       unblock_signals();
 }
 
 static DEFINE_SPINLOCK(mc_devices_lock);
index 5b5b64c..3c62ae8 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 #include <errno.h>
 #include <termios.h>
 #include <unistd.h>
@@ -167,14 +168,29 @@ static void port_pre_exec(void *arg)
 int port_connection(int fd, int *socket, int *pid_out)
 {
        int new, err;
-       char *argv[] = { "/usr/sbin/in.telnetd", "-L",
+       char *env;
+       char *argv[] = { "in.telnetd", "-L",
                         OS_LIB_PATH "/uml/port-helper", NULL };
        struct port_pre_exec_data data;
 
+       if ((env = getenv("UML_PORT_HELPER")))
+               argv[2] = env;
+
        new = accept(fd, NULL, 0);
        if (new < 0)
                return -errno;
 
+       err = os_access(argv[2], X_OK);
+       if (err < 0) {
+               printk(UM_KERN_ERR "port_connection : error accessing port-helper "
+                      "executable at %s: %s\n", argv[2], strerror(-err));
+               if (env == NULL)
+                       printk(UM_KERN_ERR "Set UML_PORT_HELPER environment "
+                               "variable to path to uml-utilities port-helper "
+                               "binary\n");
+               goto out_close;
+       }
+
        err = os_pipe(socket, 0, 0);
        if (err < 0)
                goto out_close;
index 69d2d00..b03269f 100644 (file)
@@ -1526,13 +1526,19 @@ static void do_io(struct io_thread_req *req, struct io_desc *desc)
                        }
                        break;
                case REQ_OP_DISCARD:
-               case REQ_OP_WRITE_ZEROES:
                        n = os_falloc_punch(req->fds[bit], off, len);
                        if (n) {
                                req->error = map_error(-n);
                                return;
                        }
                        break;
+               case REQ_OP_WRITE_ZEROES:
+                       n = os_falloc_zeroes(req->fds[bit], off, len);
+                       if (n) {
+                               req->error = map_error(-n);
+                               return;
+                       }
+                       break;
                default:
                        WARN_ON_ONCE(1);
                        req->error = BLK_STS_NOTSUPP;
index 4fc1a5d..1d6f6a6 100644 (file)
@@ -67,6 +67,7 @@ static LIST_HEAD(vector_devices);
 static int driver_registered;
 
 static void vector_eth_configure(int n, struct arglist *def);
+static int vector_mmsg_rx(struct vector_private *vp, int budget);
 
 /* Argument accessors to set variables (and/or set default values)
  * mtu, buffer sizing, default headroom, etc
@@ -77,7 +78,6 @@ static void vector_eth_configure(int n, struct arglist *def);
 #define DEFAULT_VECTOR_SIZE 64
 #define TX_SMALL_PACKET 128
 #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
-#define MAX_ITERATIONS 64
 
 static const struct {
        const char string[ETH_GSTRING_LEN];
@@ -458,7 +458,6 @@ static int vector_send(struct vector_queue *qi)
                                        vp->estats.tx_queue_running_average =
                                                (vp->estats.tx_queue_running_average + result) >> 1;
                                }
-                               netif_trans_update(qi->dev);
                                netif_wake_queue(qi->dev);
                                /* if TX is busy, break out of the send loop,
                                 *  poll write IRQ will reschedule xmit for us
@@ -470,8 +469,6 @@ static int vector_send(struct vector_queue *qi)
                        }
                }
                spin_unlock(&qi->head_lock);
-       } else {
-               tasklet_schedule(&vp->tx_poll);
        }
        return queue_depth;
 }
@@ -608,7 +605,7 @@ out_fail:
 
 /*
  * We do not use the RX queue as a proper wraparound queue for now
- * This is not necessary because the consumption via netif_rx()
+ * This is not necessary because the consumption via napi_gro_receive()
  * happens in-line. While we can try using the return code of
  * netif_rx() for flow control there are no drivers doing this today.
  * For this RX specific use we ignore the tail/head locks and
@@ -896,7 +893,7 @@ static int vector_legacy_rx(struct vector_private *vp)
                        skb->protocol = eth_type_trans(skb, skb->dev);
                        vp->dev->stats.rx_bytes += skb->len;
                        vp->dev->stats.rx_packets++;
-                       netif_rx(skb);
+                       napi_gro_receive(&vp->napi, skb);
                } else {
                        dev_kfree_skb_irq(skb);
                }
@@ -955,7 +952,7 @@ drop:
  * mmsg vector matched to an skb vector which we prepared earlier.
  */
 
-static int vector_mmsg_rx(struct vector_private *vp)
+static int vector_mmsg_rx(struct vector_private *vp, int budget)
 {
        int packet_count, i;
        struct vector_queue *qi = vp->rx_queue;
@@ -972,6 +969,9 @@ static int vector_mmsg_rx(struct vector_private *vp)
 
        /* Fire the Lazy Gun - get as many packets as we can in one go. */
 
+       if (budget > qi->max_depth)
+               budget = qi->max_depth;
+
        packet_count = uml_vector_recvmmsg(
                vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
 
@@ -1021,7 +1021,7 @@ static int vector_mmsg_rx(struct vector_private *vp)
                         */
                        vp->dev->stats.rx_bytes += skb->len;
                        vp->dev->stats.rx_packets++;
-                       netif_rx(skb);
+                       napi_gro_receive(&vp->napi, skb);
                } else {
                        /* Overlay header too short to do anything - discard.
                         * We can actually keep this skb and reuse it,
@@ -1044,23 +1044,6 @@ static int vector_mmsg_rx(struct vector_private *vp)
        return packet_count;
 }
 
-static void vector_rx(struct vector_private *vp)
-{
-       int err;
-       int iter = 0;
-
-       if ((vp->options & VECTOR_RX) > 0)
-               while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
-                       iter++;
-       else
-               while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
-                       iter++;
-       if ((err != 0) && net_ratelimit())
-               netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
-       if (iter == MAX_ITERATIONS)
-               netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n");
-}
-
 static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct vector_private *vp = netdev_priv(dev);
@@ -1085,25 +1068,15 @@ static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
        netdev_sent_queue(vp->dev, skb->len);
        queue_depth = vector_enqueue(vp->tx_queue, skb);
 
-       /* if the device queue is full, stop the upper layers and
-        * flush it.
-        */
-
-       if (queue_depth >= vp->tx_queue->max_depth - 1) {
-               vp->estats.tx_kicks++;
-               netif_stop_queue(dev);
-               vector_send(vp->tx_queue);
-               return NETDEV_TX_OK;
-       }
-       if (netdev_xmit_more()) {
+       if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
                mod_timer(&vp->tl, vp->coalesce);
                return NETDEV_TX_OK;
+       } else {
+               queue_depth = vector_send(vp->tx_queue);
+               if (queue_depth > 0)
+                       napi_schedule(&vp->napi);
        }
-       if (skb->len < TX_SMALL_PACKET) {
-               vp->estats.tx_kicks++;
-               vector_send(vp->tx_queue);
-       } else
-               tasklet_schedule(&vp->tx_poll);
+
        return NETDEV_TX_OK;
 }
 
@@ -1114,7 +1087,7 @@ static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
 
        if (!netif_running(dev))
                return IRQ_NONE;
-       vector_rx(vp);
+       napi_schedule(&vp->napi);
        return IRQ_HANDLED;
 
 }
@@ -1133,8 +1106,7 @@ static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
         * tweaking the IRQ mask less costly
         */
 
-       if (vp->in_write_poll)
-               tasklet_schedule(&vp->tx_poll);
+       napi_schedule(&vp->napi);
        return IRQ_HANDLED;
 
 }
@@ -1161,7 +1133,8 @@ static int vector_net_close(struct net_device *dev)
                um_free_irq(vp->tx_irq, dev);
                vp->tx_irq = 0;
        }
-       tasklet_kill(&vp->tx_poll);
+       napi_disable(&vp->napi);
+       netif_napi_del(&vp->napi);
        if (vp->fds->rx_fd > 0) {
                if (vp->bpf)
                        uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
@@ -1193,15 +1166,32 @@ static int vector_net_close(struct net_device *dev)
        return 0;
 }
 
-/* TX tasklet */
-
-static void vector_tx_poll(struct tasklet_struct *t)
+static int vector_poll(struct napi_struct *napi, int budget)
 {
-       struct vector_private *vp = from_tasklet(vp, t, tx_poll);
+       struct vector_private *vp = container_of(napi, struct vector_private, napi);
+       int work_done = 0;
+       int err;
+       bool tx_enqueued = false;
 
-       vp->estats.tx_kicks++;
-       vector_send(vp->tx_queue);
+       if ((vp->options & VECTOR_TX) != 0)
+               tx_enqueued = (vector_send(vp->tx_queue) > 0);
+       if ((vp->options & VECTOR_RX) > 0)
+               err = vector_mmsg_rx(vp, budget);
+       else {
+               err = vector_legacy_rx(vp);
+               if (err > 0)
+                       err = 1;
+       }
+       if (err > 0)
+               work_done += err;
+
+       if (tx_enqueued || err > 0)
+               napi_schedule(napi);
+       if (work_done < budget)
+               napi_complete_done(napi, work_done);
+       return work_done;
 }
+
 static void vector_reset_tx(struct work_struct *work)
 {
        struct vector_private *vp =
@@ -1265,6 +1255,9 @@ static int vector_net_open(struct net_device *dev)
                        goto out_close;
        }
 
+       netif_napi_add(vp->dev, &vp->napi, vector_poll, get_depth(vp->parsed));
+       napi_enable(&vp->napi);
+
        /* READ IRQ */
        err = um_request_irq(
                irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
@@ -1306,15 +1299,15 @@ static int vector_net_open(struct net_device *dev)
                uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
 
        netif_start_queue(dev);
+       vector_reset_stats(vp);
 
        /* clear buffer - it can happen that the host side of the interface
         * is full when we get here. In this case, new data is never queued,
         * SIGIOs never arrive, and the net never works.
         */
 
-       vector_rx(vp);
+       napi_schedule(&vp->napi);
 
-       vector_reset_stats(vp);
        vdevice = find_device(vp->unit);
        vdevice->opened = 1;
 
@@ -1543,15 +1536,16 @@ static const struct net_device_ops vector_netdev_ops = {
 #endif
 };
 
-
 static void vector_timer_expire(struct timer_list *t)
 {
        struct vector_private *vp = from_timer(vp, t, tl);
 
        vp->estats.tx_kicks++;
-       vector_send(vp->tx_queue);
+       napi_schedule(&vp->napi);
 }
 
+
+
 static void vector_eth_configure(
                int n,
                struct arglist *def
@@ -1634,7 +1628,6 @@ static void vector_eth_configure(
        });
 
        dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
-       tasklet_setup(&vp->tx_poll, vector_tx_poll);
        INIT_WORK(&vp->reset_tx, vector_reset_tx);
 
        timer_setup(&vp->tl, vector_timer_expire, 0);
index 8fff93a..2a1fa8e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
+
 #include "vector_user.h"
 
 /* Queue structure specially adapted for multiple enqueue/dequeue
@@ -72,6 +73,7 @@ struct vector_private {
        struct list_head list;
        spinlock_t lock;
        struct net_device *dev;
+       struct napi_struct              napi    ____cacheline_aligned;
 
        int unit;
 
@@ -115,7 +117,6 @@ struct vector_private {
 
        spinlock_t stats_lock;
 
-       struct tasklet_struct tx_poll;
        bool rexmit_scheduled;
        bool opened;
        bool in_write_poll;
index e4ffeb9..c650e42 100644 (file)
@@ -771,7 +771,7 @@ int uml_vector_detach_bpf(int fd, void *bpf)
                printk(KERN_ERR BPF_DETACH_FAIL, prog->len, prog->filter, fd, -errno);
        return err;
 }
-void *uml_vector_default_bpf(void *mac)
+void *uml_vector_default_bpf(const void *mac)
 {
        struct sock_filter *bpf;
        uint32_t *mac1 = (uint32_t *)(mac + 2);
index d29d5fd..3a73d17 100644 (file)
@@ -97,7 +97,7 @@ extern int uml_vector_recvmmsg(
        unsigned int vlen,
        unsigned int flags
 );
-extern void *uml_vector_default_bpf(void *mac);
+extern void *uml_vector_default_bpf(const void *mac);
 extern void *uml_vector_user_bpf(char *filename);
 extern int uml_vector_attach_bpf(int fd, void *bpf);
 extern int uml_vector_detach_bpf(int fd, void *bpf);
index f512704..22b39de 100644 (file)
@@ -4,8 +4,10 @@
 
 #ifdef CONFIG_64BIT
 #undef CONFIG_X86_32
+#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_sse_pf64))
 #else
 #define CONFIG_X86_32 1
+#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_8regs))
 #endif
 
 #include <asm/cpufeature.h>
@@ -16,7 +18,7 @@
 #undef XOR_SELECT_TEMPLATE
 /* pick an arbitrary one - measuring isn't possible with inf-cpu */
 #define XOR_SELECT_TEMPLATE(x) \
-       (time_travel_mode == TT_MODE_INFCPU ? &xor_block_8regs : NULL)
+       (time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x))
 #endif
 
 #endif
index 0021405..fafde1d 100644 (file)
@@ -168,6 +168,7 @@ extern unsigned os_major(unsigned long long dev);
 extern unsigned os_minor(unsigned long long dev);
 extern unsigned long long os_makedev(unsigned major, unsigned minor);
 extern int os_falloc_punch(int fd, unsigned long long offset, int count);
+extern int os_falloc_zeroes(int fd, unsigned long long offset, int count);
 extern int os_eventfd(unsigned int initval, int flags);
 extern int os_sendmsg_fds(int fd, const void *buf, unsigned int len,
                          const int *fds, unsigned int fds_num);
index ca69d72..484141b 100644 (file)
@@ -25,8 +25,8 @@ void uml_dtb_init(void)
                return;
        }
 
-       unflatten_device_tree();
        early_init_fdt_scan_reserved_mem();
+       unflatten_device_tree();
 }
 
 static int __init uml_dtb_setup(char *line, int *add)
index e4421db..fc4450d 100644 (file)
@@ -625,6 +625,15 @@ int os_falloc_punch(int fd, unsigned long long offset, int len)
        return n;
 }
 
+int os_falloc_zeroes(int fd, unsigned long long offset, int len)
+{
+       int n = fallocate(fd, FALLOC_FL_ZERO_RANGE|FALLOC_FL_KEEP_SIZE, offset, len);
+
+       if (n < 0)
+               return -errno;
+       return n;
+}
+
 int os_eventfd(unsigned int initval, int flags)
 {
        int fd = eventfd(initval, flags);
index 32e88ba..b459745 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <stdlib.h>
+#include <string.h>
 #include <unistd.h>
 #include <errno.h>
 #include <sched.h>
@@ -99,6 +100,10 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
                CATCH_EINTR(waitpid(pid, NULL, __WALL));
        }
 
+       if (ret < 0)
+               printk(UM_KERN_ERR "run_helper : failed to exec %s on host: %s\n",
+                      argv[0], strerror(-ret));
+
 out_free2:
        kfree(data.buf);
 out_close:
index 6c5041c..4d5591d 100644 (file)
 
 static timer_t event_high_res_timer = 0;
 
-static inline long long timeval_to_ns(const struct timeval *tv)
-{
-       return ((long long) tv->tv_sec * UM_NSEC_PER_SEC) +
-               tv->tv_usec * UM_NSEC_PER_USEC;
-}
-
 static inline long long timespec_to_ns(const struct timespec *ts)
 {
        return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + ts->tv_nsec;
index 7340d9f..ff45a27 100644 (file)
@@ -224,6 +224,7 @@ config X86
        select HAVE_KPROBES_ON_FTRACE
        select HAVE_FUNCTION_ERROR_INJECTION
        select HAVE_KRETPROBES
+       select HAVE_RETHOOK
        select HAVE_KVM
        select HAVE_LIVEPATCH                   if X86_64
        select HAVE_MIXED_BREAKPOINTS_REGS
index 946f74d..259383e 100644 (file)
@@ -172,7 +172,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
        # xor remaining bytes from partial register into output
        mov             %rcx,%rax
        and             $0xf,%rcx
-       jz              .Ldone8
+       jz              .Ldone2
        mov             %rax,%r9
        and             $~0xf,%r9
 
@@ -438,7 +438,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
        # xor remaining bytes from partial register into output
        mov             %rcx,%rax
        and             $0xf,%rcx
-       jz              .Ldone8
+       jz              .Ldone4
        mov             %rax,%r9
        and             $~0xf,%r9
 
index 71fae5a..2077ce7 100644 (file)
@@ -297,7 +297,7 @@ ___
 $code.=<<___;
        mov     \$1,%eax
 .Lno_key:
-       ret
+       RET
 ___
 &end_function("poly1305_init_x86_64");
 
@@ -373,7 +373,7 @@ $code.=<<___;
 .cfi_adjust_cfa_offset -48
 .Lno_data:
 .Lblocks_epilogue:
-       ret
+       RET
 .cfi_endproc
 ___
 &end_function("poly1305_blocks_x86_64");
@@ -399,7 +399,7 @@ $code.=<<___;
        mov     %rax,0($mac)    # write result
        mov     %rcx,8($mac)
 
-       ret
+       RET
 ___
 &end_function("poly1305_emit_x86_64");
 if ($avx) {
@@ -429,7 +429,7 @@ ___
        &poly1305_iteration();
 $code.=<<___;
        pop $ctx
-       ret
+       RET
 .size  __poly1305_block,.-__poly1305_block
 
 .type  __poly1305_init_avx,\@abi-omnipotent
@@ -594,7 +594,7 @@ __poly1305_init_avx:
 
        lea     -48-64($ctx),$ctx       # size [de-]optimization
        pop %rbp
-       ret
+       RET
 .size  __poly1305_init_avx,.-__poly1305_init_avx
 ___
 
@@ -747,7 +747,7 @@ $code.=<<___;
 .cfi_restore   %rbp
 .Lno_data_avx:
 .Lblocks_avx_epilogue:
-       ret
+       RET
 .cfi_endproc
 
 .align 32
@@ -1452,7 +1452,7 @@ $code.=<<___      if (!$win64);
 ___
 $code.=<<___;
        vzeroupper
-       ret
+       RET
 .cfi_endproc
 ___
 &end_function("poly1305_blocks_avx");
@@ -1508,7 +1508,7 @@ $code.=<<___;
        mov     %rax,0($mac)    # write result
        mov     %rcx,8($mac)
 
-       ret
+       RET
 ___
 &end_function("poly1305_emit_avx");
 
@@ -1675,7 +1675,7 @@ $code.=<<___;
 .cfi_restore   %rbp
 .Lno_data_avx2$suffix:
 .Lblocks_avx2_epilogue$suffix:
-       ret
+       RET
 .cfi_endproc
 
 .align 32
@@ -2201,7 +2201,7 @@ $code.=<<___      if (!$win64);
 ___
 $code.=<<___;
        vzeroupper
-       ret
+       RET
 .cfi_endproc
 ___
 if($avx > 2 && $avx512) {
@@ -2792,7 +2792,7 @@ $code.=<<___      if (!$win64);
 .cfi_def_cfa_register  %rsp
 ___
 $code.=<<___;
-       ret
+       RET
 .cfi_endproc
 ___
 
@@ -2893,7 +2893,7 @@ $code.=<<___      if ($flavour =~ /elf32/);
 ___
 $code.=<<___;
        mov     \$1,%eax
-       ret
+       RET
 .size  poly1305_init_base2_44,.-poly1305_init_base2_44
 ___
 {
@@ -3010,7 +3010,7 @@ poly1305_blocks_vpmadd52:
        jnz             .Lblocks_vpmadd52_4x
 
 .Lno_data_vpmadd52:
-       ret
+       RET
 .size  poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
 ___
 }
@@ -3451,7 +3451,7 @@ poly1305_blocks_vpmadd52_4x:
        vzeroall
 
 .Lno_data_vpmadd52_4x:
-       ret
+       RET
 .size  poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
 ___
 }
@@ -3824,7 +3824,7 @@ $code.=<<___;
        vzeroall
 
 .Lno_data_vpmadd52_8x:
-       ret
+       RET
 .size  poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
 ___
 }
@@ -3861,7 +3861,7 @@ poly1305_emit_base2_44:
        mov     %rax,0($mac)    # write result
        mov     %rcx,8($mac)
 
-       ret
+       RET
 .size  poly1305_emit_base2_44,.-poly1305_emit_base2_44
 ___
 }      }       }
@@ -3916,7 +3916,7 @@ xor128_encrypt_n_pad:
 
 .Ldone_enc:
        mov     $otp,%rax
-       ret
+       RET
 .size  xor128_encrypt_n_pad,.-xor128_encrypt_n_pad
 
 .globl xor128_decrypt_n_pad
@@ -3967,7 +3967,7 @@ xor128_decrypt_n_pad:
 
 .Ldone_dec:
        mov     $otp,%rax
-       ret
+       RET
 .size  xor128_decrypt_n_pad,.-xor128_decrypt_n_pad
 ___
 }
@@ -4109,7 +4109,7 @@ avx_handler:
        pop     %rbx
        pop     %rdi
        pop     %rsi
-       ret
+       RET
 .size  avx_handler,.-avx_handler
 
 .section       .pdata
index 71e6aae..b12b9ef 100644 (file)
@@ -513,5 +513,5 @@ SYM_FUNC_START(sm3_transform_avx)
 
        movq %rbp, %rsp;
        popq %rbp;
-       ret;
+       RET;
 SYM_FUNC_END(sm3_transform_avx)
index 7f3886e..eca5d6e 100644 (file)
@@ -3,8 +3,7 @@ out := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
-         $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
+$(shell mkdir -p $(out) $(uapi))
 
 syscall32 := $(src)/syscall_32.tbl
 syscall64 := $(src)/syscall_64.tbl
index 2a1f873..7cede4d 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <linux/sched.h>
 #include <linux/ftrace.h>
-#include <linux/kprobes.h>
+#include <linux/rethook.h>
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
@@ -16,7 +16,7 @@ struct unwind_state {
        unsigned long stack_mask;
        struct task_struct *task;
        int graph_idx;
-#ifdef CONFIG_KRETPROBES
+#if defined(CONFIG_RETHOOK)
        struct llist_node *kr_cur;
 #endif
        bool error;
@@ -104,19 +104,18 @@ void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
 #endif
 
 static inline
-unsigned long unwind_recover_kretprobe(struct unwind_state *state,
-                                      unsigned long addr, unsigned long *addr_p)
+unsigned long unwind_recover_rethook(struct unwind_state *state,
+                                    unsigned long addr, unsigned long *addr_p)
 {
-#ifdef CONFIG_KRETPROBES
-       return is_kretprobe_trampoline(addr) ?
-               kretprobe_find_ret_addr(state->task, addr_p, &state->kr_cur) :
-               addr;
-#else
-       return addr;
+#ifdef CONFIG_RETHOOK
+       if (is_rethook_trampoline(addr))
+               return rethook_find_ret_addr(state->task, (unsigned long)addr_p,
+                                            &state->kr_cur);
 #endif
+       return addr;
 }
 
-/* Recover the return address modified by kretprobe and ftrace_graph. */
+/* Recover the return address modified by rethook and ftrace_graph. */
 static inline
 unsigned long unwind_recover_ret_addr(struct unwind_state *state,
                                     unsigned long addr, unsigned long *addr_p)
@@ -125,7 +124,7 @@ unsigned long unwind_recover_ret_addr(struct unwind_state *state,
 
        ret = ftrace_graph_ret_addr(state->task, &state->graph_idx,
                                    addr, addr_p);
-       return unwind_recover_kretprobe(state, ret, addr_p);
+       return unwind_recover_rethook(state, ret, addr_p);
 }
 
 /*
index 6462e3d..c41ef42 100644 (file)
@@ -103,6 +103,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)  += ftrace.o
 obj-$(CONFIG_X86_TSC)          += trace_clock.o
 obj-$(CONFIG_TRACING)          += trace.o
+obj-$(CONFIG_RETHOOK)          += rethook.o
 obj-$(CONFIG_CRASH_CORE)       += crash_core_$(BITS).o
 obj-$(CONFIG_KEXEC_CORE)       += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC_CORE)       += relocate_kernel_$(BITS).o crash.o
index 7d3a2e2..c993521 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <asm/asm.h>
 #include <asm/frame.h>
+#include <asm/insn.h>
 
 #ifdef CONFIG_X86_64
 
index 8ef933c..7c4ab88 100644 (file)
@@ -811,18 +811,6 @@ set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
                = (regs->flags & X86_EFLAGS_IF);
 }
 
-void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
-{
-       unsigned long *sara = stack_addr(regs);
-
-       ri->ret_addr = (kprobe_opcode_t *) *sara;
-       ri->fp = sara;
-
-       /* Replace the return addr with trampoline addr */
-       *sara = (unsigned long) &__kretprobe_trampoline;
-}
-NOKPROBE_SYMBOL(arch_prepare_kretprobe);
-
 static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
                               struct kprobe_ctlblk *kcb)
 {
@@ -1023,101 +1011,6 @@ int kprobe_int3_handler(struct pt_regs *regs)
 }
 NOKPROBE_SYMBOL(kprobe_int3_handler);
 
-/*
- * When a retprobed function returns, this code saves registers and
- * calls trampoline_handler() runs, which calls the kretprobe's handler.
- */
-asm(
-       ".text\n"
-       ".global __kretprobe_trampoline\n"
-       ".type __kretprobe_trampoline, @function\n"
-       "__kretprobe_trampoline:\n"
-#ifdef CONFIG_X86_64
-       ANNOTATE_NOENDBR
-       /* Push a fake return address to tell the unwinder it's a kretprobe. */
-       "       pushq $__kretprobe_trampoline\n"
-       UNWIND_HINT_FUNC
-       /* Save the 'sp - 8', this will be fixed later. */
-       "       pushq %rsp\n"
-       "       pushfq\n"
-       SAVE_REGS_STRING
-       "       movq %rsp, %rdi\n"
-       "       call trampoline_handler\n"
-       RESTORE_REGS_STRING
-       /* In trampoline_handler(), 'regs->flags' is copied to 'regs->sp'. */
-       "       addq $8, %rsp\n"
-       "       popfq\n"
-#else
-       /* Push a fake return address to tell the unwinder it's a kretprobe. */
-       "       pushl $__kretprobe_trampoline\n"
-       UNWIND_HINT_FUNC
-       /* Save the 'sp - 4', this will be fixed later. */
-       "       pushl %esp\n"
-       "       pushfl\n"
-       SAVE_REGS_STRING
-       "       movl %esp, %eax\n"
-       "       call trampoline_handler\n"
-       RESTORE_REGS_STRING
-       /* In trampoline_handler(), 'regs->flags' is copied to 'regs->sp'. */
-       "       addl $4, %esp\n"
-       "       popfl\n"
-#endif
-       ASM_RET
-       ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n"
-);
-NOKPROBE_SYMBOL(__kretprobe_trampoline);
-/*
- * __kretprobe_trampoline() skips updating frame pointer. The frame pointer
- * saved in trampoline_handler() points to the real caller function's
- * frame pointer. Thus the __kretprobe_trampoline() doesn't have a
- * standard stack frame with CONFIG_FRAME_POINTER=y.
- * Let's mark it non-standard function. Anyway, FP unwinder can correctly
- * unwind without the hint.
- */
-STACK_FRAME_NON_STANDARD_FP(__kretprobe_trampoline);
-
-/* This is called from kretprobe_trampoline_handler(). */
-void arch_kretprobe_fixup_return(struct pt_regs *regs,
-                                kprobe_opcode_t *correct_ret_addr)
-{
-       unsigned long *frame_pointer = &regs->sp + 1;
-
-       /* Replace fake return address with real one. */
-       *frame_pointer = (unsigned long)correct_ret_addr;
-}
-
-/*
- * Called from __kretprobe_trampoline
- */
-__used __visible void trampoline_handler(struct pt_regs *regs)
-{
-       unsigned long *frame_pointer;
-
-       /* fixup registers */
-       regs->cs = __KERNEL_CS;
-#ifdef CONFIG_X86_32
-       regs->gs = 0;
-#endif
-       regs->ip = (unsigned long)&__kretprobe_trampoline;
-       regs->orig_ax = ~0UL;
-       regs->sp += sizeof(long);
-       frame_pointer = &regs->sp + 1;
-
-       /*
-        * The return address at 'frame_pointer' is recovered by the
-        * arch_kretprobe_fixup_return() which called from the
-        * kretprobe_trampoline_handler().
-        */
-       kretprobe_trampoline_handler(regs, frame_pointer);
-
-       /*
-        * Copy FLAGS to 'pt_regs::sp' so that __kretprobe_trapmoline()
-        * can do RET right after POPF.
-        */
-       regs->sp = regs->flags;
-}
-NOKPROBE_SYMBOL(trampoline_handler);
-
 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 {
        struct kprobe *cur = kprobe_running();
index b4a54a5..e6b8c53 100644 (file)
@@ -106,7 +106,8 @@ asm (
                        ".global optprobe_template_entry\n"
                        "optprobe_template_entry:\n"
 #ifdef CONFIG_X86_64
-                       /* We don't bother saving the ss register */
+                       "       pushq $" __stringify(__KERNEL_DS) "\n"
+                       /* Save the 'sp - 8', this will be fixed later. */
                        "       pushq %rsp\n"
                        "       pushfq\n"
                        ".global optprobe_template_clac\n"
@@ -121,14 +122,17 @@ asm (
                        ".global optprobe_template_call\n"
                        "optprobe_template_call:\n"
                        ASM_NOP5
-                       /* Move flags to rsp */
+                       /* Copy 'regs->flags' into 'regs->ss'. */
                        "       movq 18*8(%rsp), %rdx\n"
-                       "       movq %rdx, 19*8(%rsp)\n"
+                       "       movq %rdx, 20*8(%rsp)\n"
                        RESTORE_REGS_STRING
-                       /* Skip flags entry */
-                       "       addq $8, %rsp\n"
+                       /* Skip 'regs->flags' and 'regs->sp'. */
+                       "       addq $16, %rsp\n"
+                       /* And pop flags register from 'regs->ss'. */
                        "       popfq\n"
 #else /* CONFIG_X86_32 */
+                       "       pushl %ss\n"
+                       /* Save the 'sp - 4', this will be fixed later. */
                        "       pushl %esp\n"
                        "       pushfl\n"
                        ".global optprobe_template_clac\n"
@@ -142,12 +146,13 @@ asm (
                        ".global optprobe_template_call\n"
                        "optprobe_template_call:\n"
                        ASM_NOP5
-                       /* Move flags into esp */
+                       /* Copy 'regs->flags' into 'regs->ss'. */
                        "       movl 14*4(%esp), %edx\n"
-                       "       movl %edx, 15*4(%esp)\n"
+                       "       movl %edx, 16*4(%esp)\n"
                        RESTORE_REGS_STRING
-                       /* Skip flags entry */
-                       "       addl $4, %esp\n"
+                       /* Skip 'regs->flags' and 'regs->sp'. */
+                       "       addl $8, %esp\n"
+                       /* And pop flags register from 'regs->ss'. */
                        "       popfl\n"
 #endif
                        ".global optprobe_template_end\n"
@@ -179,6 +184,8 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
                kprobes_inc_nmissed_count(&op->kp);
        } else {
                struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+               /* Adjust stack pointer */
+               regs->sp += sizeof(long);
                /* Save skipped registers */
                regs->cs = __KERNEL_CS;
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/rethook.c b/arch/x86/kernel/rethook.c
new file mode 100644 (file)
index 0000000..8a1c011
--- /dev/null
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * x86 implementation of rethook. Mostly copied from arch/x86/kernel/kprobes/core.c.
+ */
+#include <linux/bug.h>
+#include <linux/rethook.h>
+#include <linux/kprobes.h>
+#include <linux/objtool.h>
+
+#include "kprobes/common.h"
+
+__visible void arch_rethook_trampoline_callback(struct pt_regs *regs);
+
+#ifndef ANNOTATE_NOENDBR
+#define ANNOTATE_NOENDBR
+#endif
+
+/*
+ * When a target function returns, this code saves registers and calls
+ * arch_rethook_trampoline_callback(), which calls the rethook handler.
+ */
+asm(
+       ".text\n"
+       ".global arch_rethook_trampoline\n"
+       ".type arch_rethook_trampoline, @function\n"
+       "arch_rethook_trampoline:\n"
+#ifdef CONFIG_X86_64
+       ANNOTATE_NOENDBR        /* This is only jumped from ret instruction */
+       /* Push a fake return address to tell the unwinder it's a rethook. */
+       "       pushq $arch_rethook_trampoline\n"
+       UNWIND_HINT_FUNC
+       "       pushq $" __stringify(__KERNEL_DS) "\n"
+       /* Save the 'sp - 16', this will be fixed later. */
+       "       pushq %rsp\n"
+       "       pushfq\n"
+       SAVE_REGS_STRING
+       "       movq %rsp, %rdi\n"
+       "       call arch_rethook_trampoline_callback\n"
+       RESTORE_REGS_STRING
+       /* In the callback function, 'regs->flags' is copied to 'regs->ss'. */
+       "       addq $16, %rsp\n"
+       "       popfq\n"
+#else
+       /* Push a fake return address to tell the unwinder it's a rethook. */
+       "       pushl $arch_rethook_trampoline\n"
+       UNWIND_HINT_FUNC
+       "       pushl %ss\n"
+       /* Save the 'sp - 8', this will be fixed later. */
+       "       pushl %esp\n"
+       "       pushfl\n"
+       SAVE_REGS_STRING
+       "       movl %esp, %eax\n"
+       "       call arch_rethook_trampoline_callback\n"
+       RESTORE_REGS_STRING
+       /* In the callback function, 'regs->flags' is copied to 'regs->ss'. */
+       "       addl $8, %esp\n"
+       "       popfl\n"
+#endif
+       ASM_RET
+       ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n"
+);
+NOKPROBE_SYMBOL(arch_rethook_trampoline);
+
+/*
+ * Called from arch_rethook_trampoline
+ */
+__used __visible void arch_rethook_trampoline_callback(struct pt_regs *regs)
+{
+       unsigned long *frame_pointer;
+
+       /* fixup registers */
+       regs->cs = __KERNEL_CS;
+#ifdef CONFIG_X86_32
+       regs->gs = 0;
+#endif
+       regs->ip = (unsigned long)&arch_rethook_trampoline;
+       regs->orig_ax = ~0UL;
+       regs->sp += 2*sizeof(long);
+       frame_pointer = (long *)(regs + 1);
+
+       /*
+        * The return address at 'frame_pointer' is recovered by the
+        * arch_rethook_fixup_return() which called from this
+        * rethook_trampoline_handler().
+        */
+       rethook_trampoline_handler(regs, (unsigned long)frame_pointer);
+
+       /*
+        * Copy FLAGS to 'pt_regs::ss' so that arch_rethook_trapmoline()
+        * can do RET right after POPF.
+        */
+       *(unsigned long *)&regs->ss = regs->flags;
+}
+NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
+
+/*
+ * arch_rethook_trampoline() skips updating frame pointer. The frame pointer
+ * saved in arch_rethook_trampoline_callback() points to the real caller
+ * function's frame pointer. Thus the arch_rethook_trampoline() doesn't have
+ * a standard stack frame with CONFIG_FRAME_POINTER=y.
+ * Let's mark it non-standard function. Anyway, FP unwinder can correctly
+ * unwind without the hint.
+ */
+STACK_FRAME_NON_STANDARD_FP(arch_rethook_trampoline);
+
+/* This is called from rethook_trampoline_handler(). */
+void arch_rethook_fixup_return(struct pt_regs *regs,
+                              unsigned long correct_ret_addr)
+{
+       unsigned long *frame_pointer = (void *)(regs + 1);
+
+       /* Replace fake return address with real one. */
+       *frame_pointer = correct_ret_addr;
+}
+NOKPROBE_SYMBOL(arch_rethook_fixup_return);
+
+void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount)
+{
+       unsigned long *stack = (unsigned long *)regs->sp;
+
+       rh->ret_addr = stack[0];
+       rh->frame = regs->sp;
+
+       /* Replace the return addr with trampoline addr */
+       stack[0] = (unsigned long) arch_rethook_trampoline;
+}
+NOKPROBE_SYMBOL(arch_rethook_prepare);
index 2de3c8c..794fdef 100644 (file)
@@ -550,15 +550,15 @@ bool unwind_next_frame(struct unwind_state *state)
                }
                /*
                 * There is a small chance to interrupt at the entry of
-                * __kretprobe_trampoline() where the ORC info doesn't exist.
-                * That point is right after the RET to __kretprobe_trampoline()
+                * arch_rethook_trampoline() where the ORC info doesn't exist.
+                * That point is right after the RET to arch_rethook_trampoline()
                 * which was modified return address.
-                * At that point, the @addr_p of the unwind_recover_kretprobe()
+                * At that point, the @addr_p of the unwind_recover_rethook()
                 * (this has to point the address of the stack entry storing
                 * the modified return address) must be "SP - (a stack entry)"
                 * because SP is incremented by the RET.
                 */
-               state->ip = unwind_recover_kretprobe(state, state->ip,
+               state->ip = unwind_recover_rethook(state, state->ip,
                                (unsigned long *)(state->sp - sizeof(long)));
                state->regs = (struct pt_regs *)sp;
                state->prev_regs = NULL;
@@ -573,7 +573,7 @@ bool unwind_next_frame(struct unwind_state *state)
                        goto err;
                }
                /* See UNWIND_HINT_TYPE_REGS case comment. */
-               state->ip = unwind_recover_kretprobe(state, state->ip,
+               state->ip = unwind_recover_rethook(state, state->ip,
                                (unsigned long *)(state->sp - sizeof(long)));
 
                if (state->full_regs)
index 48d6cd1..b6b9972 100644 (file)
 #include <linux/msg.h>
 #include <linux/shm.h>
 
-typedef long syscall_handler_t(void);
+typedef long syscall_handler_t(long, long, long, long, long, long);
 
 extern syscall_handler_t *sys_call_table[];
 
 #define EXECUTE_SYSCALL(syscall, regs) \
-       (((long (*)(long, long, long, long, long, long)) \
-         (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
+       (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
                                      UPT_SYSCALL_ARG2(&regs->regs), \
                                      UPT_SYSCALL_ARG3(&regs->regs), \
                                      UPT_SYSCALL_ARG4(&regs->regs), \
index fe5323f..27b29ae 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/prctl.h> /* XXX This should get the constants from libc */
 #include <registers.h>
 #include <os.h>
-#include <registers.h>
 
 long arch_prctl(struct task_struct *task, int option,
                unsigned long __user *arg2)
index 6713c65..b265e4b 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 3ea7fe6..d8443cf 100644 (file)
@@ -13,34 +13,20 @@ obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o
 endif
 
 quiet_cmd_extract_certs  = CERT    $@
-      cmd_extract_certs  = $(obj)/extract-cert $(2) $@
+      cmd_extract_certs  = $(obj)/extract-cert $(extract-cert-in) $@
+extract-cert-in = $(or $(filter-out $(obj)/extract-cert, $(real-prereqs)),"")
 
 $(obj)/system_certificates.o: $(obj)/x509_certificate_list
 
 $(obj)/x509_certificate_list: $(CONFIG_SYSTEM_TRUSTED_KEYS) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_SYSTEM_TRUSTED_KEYS),$<,""))
+       $(call if_changed,extract_certs)
 
 targets += x509_certificate_list
 
-ifeq ($(CONFIG_MODULE_SIG),y)
-       SIGN_KEY = y
-endif
-
-ifeq ($(CONFIG_IMA_APPRAISE_MODSIG),y)
-ifeq ($(CONFIG_MODULES),y)
-       SIGN_KEY = y
-endif
-endif
-
-ifdef SIGN_KEY
-###############################################################################
-#
 # If module signing is requested, say by allyesconfig, but a key has not been
 # supplied, then one will need to be generated to make sure the build does not
 # fail and that the kernel may be used afterwards.
 #
-###############################################################################
-
 # We do it this way rather than having a boolean option for enabling an
 # external private key, because 'make randconfig' might enable such a
 # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
@@ -67,23 +53,22 @@ $(obj)/x509.genkey:
 
 endif # CONFIG_MODULE_SIG_KEY
 
-# If CONFIG_MODULE_SIG_KEY isn't a PKCS#11 URI, depend on it
-ifneq ($(filter-out pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),)
-X509_DEP := $(CONFIG_MODULE_SIG_KEY)
-endif
-
 $(obj)/system_certificates.o: $(obj)/signing_key.x509
 
-$(obj)/signing_key.x509: $(X509_DEP) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_MODULE_SIG_KEY),$(if $(X509_DEP),$<,$(CONFIG_MODULE_SIG_KEY)),""))
-endif # CONFIG_MODULE_SIG
+PKCS11_URI := $(filter pkcs11:%, $(CONFIG_MODULE_SIG_KEY))
+ifdef PKCS11_URI
+$(obj)/signing_key.x509: extract-cert-in := $(PKCS11_URI)
+endif
+
+$(obj)/signing_key.x509: $(filter-out $(PKCS11_URI),$(CONFIG_MODULE_SIG_KEY)) $(obj)/extract-cert FORCE
+       $(call if_changed,extract_certs)
 
 targets += signing_key.x509
 
 $(obj)/revocation_certificates.o: $(obj)/x509_revocation_list
 
 $(obj)/x509_revocation_list: $(CONFIG_SYSTEM_REVOCATION_KEYS) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_SYSTEM_REVOCATION_KEYS),$<,""))
+       $(call if_changed,extract_certs)
 
 targets += x509_revocation_list
 
index e1645e6..003e25d 100644 (file)
@@ -9,10 +9,7 @@
 system_certificate_list:
 __cert_list_start:
 __module_cert_start:
-#if defined(CONFIG_MODULE_SIG) || (defined(CONFIG_IMA_APPRAISE_MODSIG) \
-                              && defined(CONFIG_MODULES))
        .incbin "certs/signing_key.x509"
-#endif
 __module_cert_end:
        .incbin "certs/x509_certificate_list"
 __cert_list_end:
index a5fe292..0555f68 100644 (file)
@@ -353,29 +353,27 @@ static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
                               struct acpi_ipmi_msg *msg)
 {
-       struct acpi_ipmi_msg *tx_msg, *temp;
-       bool msg_found = false;
+       struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
        unsigned long flags;
 
        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
-       list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
-               if (msg == tx_msg) {
-                       msg_found = true;
-                       list_del(&tx_msg->head);
+       list_for_each_entry_safe(iter, temp, &ipmi->tx_msg_list, head) {
+               if (msg == iter) {
+                       tx_msg = iter;
+                       list_del(&iter->head);
                        break;
                }
        }
        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 
-       if (msg_found)
+       if (tx_msg)
                acpi_ipmi_msg_put(tx_msg);
 }
 
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 {
        struct acpi_ipmi_device *ipmi_device = user_msg_data;
-       bool msg_found = false;
-       struct acpi_ipmi_msg *tx_msg, *temp;
+       struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
        struct device *dev = ipmi_device->dev;
        unsigned long flags;
 
@@ -387,16 +385,16 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
        }
 
        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
-       list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
-               if (msg->msgid == tx_msg->tx_msgid) {
-                       msg_found = true;
-                       list_del(&tx_msg->head);
+       list_for_each_entry_safe(iter, temp, &ipmi_device->tx_msg_list, head) {
+               if (msg->msgid == iter->tx_msgid) {
+                       tx_msg = iter;
+                       list_del(&iter->head);
                        break;
                }
        }
        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 
-       if (!msg_found) {
+       if (!tx_msg) {
                dev_warn(dev,
                         "Unexpected response (msg id %ld) is returned.\n",
                         msg->msgid);
@@ -482,15 +480,14 @@ err_ref:
 
 static void ipmi_bmc_gone(int iface)
 {
-       struct acpi_ipmi_device *ipmi_device, *temp;
-       bool dev_found = false;
+       struct acpi_ipmi_device *ipmi_device = NULL, *iter, *temp;
 
        mutex_lock(&driver_data.ipmi_lock);
-       list_for_each_entry_safe(ipmi_device, temp,
+       list_for_each_entry_safe(iter, temp,
                                 &driver_data.ipmi_devices, head) {
-               if (ipmi_device->ipmi_ifnum != iface) {
-                       dev_found = true;
-                       __ipmi_dev_kill(ipmi_device);
+               if (iter->ipmi_ifnum != iface) {
+                       ipmi_device = iter;
+                       __ipmi_dev_kill(iter);
                        break;
                }
        }
@@ -500,7 +497,7 @@ static void ipmi_bmc_gone(int iface)
                                        struct acpi_ipmi_device, head);
        mutex_unlock(&driver_data.ipmi_lock);
 
-       if (dev_found) {
+       if (ipmi_device) {
                ipmi_flush_tx_msg(ipmi_device);
                acpi_ipmi_dev_put(ipmi_device);
        }
index c7fdb12..33b7fbb 100644 (file)
@@ -319,7 +319,7 @@ repeat:
        if (res_ins)
                list_add(&res_ins->list, res_list);
        else {
-               res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+               res_ins = kmalloc(sizeof(*res_ins), GFP_KERNEL);
                if (!res_ins)
                        return -ENOMEM;
                res_ins->start = start;
index d418449..bc14547 100644 (file)
@@ -654,7 +654,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        unsigned int num_ent, i, cpc_rev;
        int pcc_subspace_id = -1;
        acpi_status status;
-       int ret = -EFAULT;
+       int ret = -ENODATA;
 
        if (osc_sb_cppc_not_supported)
                return -ENODEV;
@@ -679,9 +679,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        cpc_obj = &out_obj->package.elements[0];
        if (cpc_obj->type == ACPI_TYPE_INTEGER) {
                num_ent = cpc_obj->integer.value;
+               if (num_ent <= 1) {
+                       pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
+                                num_ent, pr->id);
+                       goto out_free;
+               }
        } else {
-               pr_debug("Unexpected entry type(%d) for NumEntries\n",
-                               cpc_obj->type);
+               pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
+                        cpc_obj->type, pr->id);
                goto out_free;
        }
        cpc_ptr->num_entries = num_ent;
@@ -691,8 +696,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        if (cpc_obj->type == ACPI_TYPE_INTEGER) {
                cpc_rev = cpc_obj->integer.value;
        } else {
-               pr_debug("Unexpected entry type(%d) for Revision\n",
-                               cpc_obj->type);
+               pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
+                        cpc_obj->type, pr->id);
                goto out_free;
        }
        cpc_ptr->version = cpc_rev;
@@ -723,7 +728,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                        if (pcc_data_alloc(pcc_subspace_id))
                                                goto out_free;
                                } else if (pcc_subspace_id != gas_t->access_width) {
-                                       pr_debug("Mismatched PCC ids.\n");
+                                       pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
+                                                pr->id);
                                        goto out_free;
                                }
                        } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -742,20 +748,21 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                         * SystemIO doesn't implement 64-bit
                                         * registers.
                                         */
-                                       pr_debug("Invalid access width %d for SystemIO register\n",
-                                               gas_t->access_width);
+                                       pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
+                                                gas_t->access_width);
                                        goto out_free;
                                }
                                if (gas_t->address & OVER_16BTS_MASK) {
                                        /* SystemIO registers use 16-bit integer addresses */
-                                       pr_debug("Invalid IO port %llu for SystemIO register\n",
-                                               gas_t->address);
+                                       pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
+                                                gas_t->address);
                                        goto out_free;
                                }
                        } else {
                                if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
                                        /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
-                                       pr_debug("Unsupported register type: %d\n", gas_t->space_id);
+                                       pr_debug("Unsupported register type (%d) in _CPC\n",
+                                                gas_t->space_id);
                                        goto out_free;
                                }
                        }
@@ -763,7 +770,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                        cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
                        memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
                } else {
-                       pr_debug("Err in entry:%d in CPC table of CPU:%d\n", i, pr->id);
+                       pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
+                                i, pr->id);
                        goto out_free;
                }
        }
index e5d7f2b..fe61f61 100644 (file)
@@ -999,80 +999,6 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc,
        return table + hdr->length;
 }
 
-static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
-               struct nfit_mem *nfit_mem)
-{
-       u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
-       u16 dcr = nfit_mem->dcr->region_index;
-       struct nfit_spa *nfit_spa;
-
-       list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
-               u16 range_index = nfit_spa->spa->range_index;
-               int type = nfit_spa_type(nfit_spa->spa);
-               struct nfit_memdev *nfit_memdev;
-
-               if (type != NFIT_SPA_BDW)
-                       continue;
-
-               list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
-                       if (nfit_memdev->memdev->range_index != range_index)
-                               continue;
-                       if (nfit_memdev->memdev->device_handle != device_handle)
-                               continue;
-                       if (nfit_memdev->memdev->region_index != dcr)
-                               continue;
-
-                       nfit_mem->spa_bdw = nfit_spa->spa;
-                       return;
-               }
-       }
-
-       dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
-                       nfit_mem->spa_dcr->range_index);
-       nfit_mem->bdw = NULL;
-}
-
-static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
-               struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
-{
-       u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
-       struct nfit_memdev *nfit_memdev;
-       struct nfit_bdw *nfit_bdw;
-       struct nfit_idt *nfit_idt;
-       u16 idt_idx, range_index;
-
-       list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
-               if (nfit_bdw->bdw->region_index != dcr)
-                       continue;
-               nfit_mem->bdw = nfit_bdw->bdw;
-               break;
-       }
-
-       if (!nfit_mem->bdw)
-               return;
-
-       nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
-
-       if (!nfit_mem->spa_bdw)
-               return;
-
-       range_index = nfit_mem->spa_bdw->range_index;
-       list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
-               if (nfit_memdev->memdev->range_index != range_index ||
-                               nfit_memdev->memdev->region_index != dcr)
-                       continue;
-               nfit_mem->memdev_bdw = nfit_memdev->memdev;
-               idt_idx = nfit_memdev->memdev->interleave_index;
-               list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
-                       if (nfit_idt->idt->interleave_index != idt_idx)
-                               continue;
-                       nfit_mem->idt_bdw = nfit_idt->idt;
-                       break;
-               }
-               break;
-       }
-}
-
 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
                struct acpi_nfit_system_address *spa)
 {
@@ -1189,7 +1115,6 @@ static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
                                nfit_mem->idt_dcr = nfit_idt->idt;
                                break;
                        }
-                       nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
                } else if (type == NFIT_SPA_PM) {
                        /*
                         * A single dimm may belong to multiple SPA-PM
@@ -1532,8 +1457,6 @@ static int num_nvdimm_formats(struct nvdimm *nvdimm)
 
        if (nfit_mem->memdev_pmem)
                formats++;
-       if (nfit_mem->memdev_bdw)
-               formats++;
        return formats;
 }
 
@@ -2079,11 +2002,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
                        continue;
                }
 
-               if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
-                       set_bit(NDD_ALIASING, &flags);
-                       set_bit(NDD_LABELING, &flags);
-               }
-
                /* collate flags across all memdevs for this dimm */
                list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
                        struct acpi_nfit_memory_map *dimm_memdev;
@@ -2118,10 +2036,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
                        cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
                }
 
-               /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
-               if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
-                       set_bit(NDD_NOBLK, &flags);
-
                if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
                        set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
                        set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
@@ -2429,272 +2343,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
        return 0;
 }
 
-static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
-{
-       struct acpi_nfit_interleave *idt = mmio->idt;
-       u32 sub_line_offset, line_index, line_offset;
-       u64 line_no, table_skip_count, table_offset;
-
-       line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
-       table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
-       line_offset = idt->line_offset[line_index]
-               * mmio->line_size;
-       table_offset = table_skip_count * mmio->table_size;
-
-       return mmio->base_offset + line_offset + table_offset + sub_line_offset;
-}
-
-static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
-{
-       struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
-       u64 offset = nfit_blk->stat_offset + mmio->size * bw;
-       const u32 STATUS_MASK = 0x80000037;
-
-       if (mmio->num_lines)
-               offset = to_interleave_offset(offset, mmio);
-
-       return readl(mmio->addr.base + offset) & STATUS_MASK;
-}
-
-static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
-               resource_size_t dpa, unsigned int len, unsigned int write)
-{
-       u64 cmd, offset;
-       struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
-
-       enum {
-               BCW_OFFSET_MASK = (1ULL << 48)-1,
-               BCW_LEN_SHIFT = 48,
-               BCW_LEN_MASK = (1ULL << 8) - 1,
-               BCW_CMD_SHIFT = 56,
-       };
-
-       cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
-       len = len >> L1_CACHE_SHIFT;
-       cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
-       cmd |= ((u64) write) << BCW_CMD_SHIFT;
-
-       offset = nfit_blk->cmd_offset + mmio->size * bw;
-       if (mmio->num_lines)
-               offset = to_interleave_offset(offset, mmio);
-
-       writeq(cmd, mmio->addr.base + offset);
-       nvdimm_flush(nfit_blk->nd_region, NULL);
-
-       if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
-               readq(mmio->addr.base + offset);
-}
-
-static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
-               resource_size_t dpa, void *iobuf, size_t len, int rw,
-               unsigned int lane)
-{
-       struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
-       unsigned int copied = 0;
-       u64 base_offset;
-       int rc;
-
-       base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
-               + lane * mmio->size;
-       write_blk_ctl(nfit_blk, lane, dpa, len, rw);
-       while (len) {
-               unsigned int c;
-               u64 offset;
-
-               if (mmio->num_lines) {
-                       u32 line_offset;
-
-                       offset = to_interleave_offset(base_offset + copied,
-                                       mmio);
-                       div_u64_rem(offset, mmio->line_size, &line_offset);
-                       c = min_t(size_t, len, mmio->line_size - line_offset);
-               } else {
-                       offset = base_offset + nfit_blk->bdw_offset;
-                       c = len;
-               }
-
-               if (rw)
-                       memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
-               else {
-                       if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
-                               arch_invalidate_pmem((void __force *)
-                                       mmio->addr.aperture + offset, c);
-
-                       memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
-               }
-
-               copied += c;
-               len -= c;
-       }
-
-       if (rw)
-               nvdimm_flush(nfit_blk->nd_region, NULL);
-
-       rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
-       return rc;
-}
-
-static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
-               resource_size_t dpa, void *iobuf, u64 len, int rw)
-{
-       struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
-       struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
-       struct nd_region *nd_region = nfit_blk->nd_region;
-       unsigned int lane, copied = 0;
-       int rc = 0;
-
-       lane = nd_region_acquire_lane(nd_region);
-       while (len) {
-               u64 c = min(len, mmio->size);
-
-               rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
-                               iobuf + copied, c, rw, lane);
-               if (rc)
-                       break;
-
-               copied += c;
-               len -= c;
-       }
-       nd_region_release_lane(nd_region, lane);
-
-       return rc;
-}
-
-static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
-               struct acpi_nfit_interleave *idt, u16 interleave_ways)
-{
-       if (idt) {
-               mmio->num_lines = idt->line_count;
-               mmio->line_size = idt->line_size;
-               if (interleave_ways == 0)
-                       return -ENXIO;
-               mmio->table_size = mmio->num_lines * interleave_ways
-                       * mmio->line_size;
-       }
-
-       return 0;
-}
-
-static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
-               struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
-{
-       struct nd_cmd_dimm_flags flags;
-       int rc;
-
-       memset(&flags, 0, sizeof(flags));
-       rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
-                       sizeof(flags), NULL);
-
-       if (rc >= 0 && flags.status == 0)
-               nfit_blk->dimm_flags = flags.flags;
-       else if (rc == -ENOTTY) {
-               /* fall back to a conservative default */
-               nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
-               rc = 0;
-       } else
-               rc = -ENXIO;
-
-       return rc;
-}
-
-static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
-               struct device *dev)
-{
-       struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
-       struct nd_blk_region *ndbr = to_nd_blk_region(dev);
-       struct nfit_blk_mmio *mmio;
-       struct nfit_blk *nfit_blk;
-       struct nfit_mem *nfit_mem;
-       struct nvdimm *nvdimm;
-       int rc;
-
-       nvdimm = nd_blk_region_to_dimm(ndbr);
-       nfit_mem = nvdimm_provider_data(nvdimm);
-       if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
-               dev_dbg(dev, "missing%s%s%s\n",
-                               nfit_mem ? "" : " nfit_mem",
-                               (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
-                               (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
-               return -ENXIO;
-       }
-
-       nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
-       if (!nfit_blk)
-               return -ENOMEM;
-       nd_blk_region_set_provider_data(ndbr, nfit_blk);
-       nfit_blk->nd_region = to_nd_region(dev);
-
-       /* map block aperture memory */
-       nfit_blk->bdw_offset = nfit_mem->bdw->offset;
-       mmio = &nfit_blk->mmio[BDW];
-       mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
-                       nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
-       if (!mmio->addr.base) {
-               dev_dbg(dev, "%s failed to map bdw\n",
-                               nvdimm_name(nvdimm));
-               return -ENOMEM;
-       }
-       mmio->size = nfit_mem->bdw->size;
-       mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
-       mmio->idt = nfit_mem->idt_bdw;
-       mmio->spa = nfit_mem->spa_bdw;
-       rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
-                       nfit_mem->memdev_bdw->interleave_ways);
-       if (rc) {
-               dev_dbg(dev, "%s failed to init bdw interleave\n",
-                               nvdimm_name(nvdimm));
-               return rc;
-       }
-
-       /* map block control memory */
-       nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
-       nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
-       mmio = &nfit_blk->mmio[DCR];
-       mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
-                       nfit_mem->spa_dcr->length);
-       if (!mmio->addr.base) {
-               dev_dbg(dev, "%s failed to map dcr\n",
-                               nvdimm_name(nvdimm));
-               return -ENOMEM;
-       }
-       mmio->size = nfit_mem->dcr->window_size;
-       mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
-       mmio->idt = nfit_mem->idt_dcr;
-       mmio->spa = nfit_mem->spa_dcr;
-       rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
-                       nfit_mem->memdev_dcr->interleave_ways);
-       if (rc) {
-               dev_dbg(dev, "%s failed to init dcr interleave\n",
-                               nvdimm_name(nvdimm));
-               return rc;
-       }
-
-       rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
-       if (rc < 0) {
-               dev_dbg(dev, "%s failed get DIMM flags\n",
-                               nvdimm_name(nvdimm));
-               return rc;
-       }
-
-       if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
-               dev_warn(dev, "unable to guarantee persistence of writes\n");
-
-       if (mmio->line_size == 0)
-               return 0;
-
-       if ((u32) nfit_blk->cmd_offset % mmio->line_size
-                       + 8 > mmio->line_size) {
-               dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
-               return -ENXIO;
-       } else if ((u32) nfit_blk->stat_offset % mmio->line_size
-                       + 8 > mmio->line_size) {
-               dev_dbg(dev, "stat_offset crosses interleave boundary\n");
-               return -ENXIO;
-       }
-
-       return 0;
-}
-
 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
                struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
 {
@@ -2911,9 +2559,6 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
        struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
                        memdev->device_handle);
        struct acpi_nfit_system_address *spa = nfit_spa->spa;
-       struct nd_blk_region_desc *ndbr_desc;
-       struct nfit_mem *nfit_mem;
-       int rc;
 
        if (!nvdimm) {
                dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
@@ -2928,30 +2573,6 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
                mapping->start = memdev->address;
                mapping->size = memdev->region_size;
                break;
-       case NFIT_SPA_DCR:
-               nfit_mem = nvdimm_provider_data(nvdimm);
-               if (!nfit_mem || !nfit_mem->bdw) {
-                       dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
-                                       spa->range_index, nvdimm_name(nvdimm));
-                       break;
-               }
-
-               mapping->size = nfit_mem->bdw->capacity;
-               mapping->start = nfit_mem->bdw->start_address;
-               ndr_desc->num_lanes = nfit_mem->bdw->windows;
-               ndr_desc->mapping = mapping;
-               ndr_desc->num_mappings = 1;
-               ndbr_desc = to_blk_region_desc(ndr_desc);
-               ndbr_desc->enable = acpi_nfit_blk_region_enable;
-               ndbr_desc->do_io = acpi_desc->blk_do_io;
-               rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
-               if (rc)
-                       return rc;
-               nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
-                               ndr_desc);
-               if (!nfit_spa->nd_region)
-                       return -ENOMEM;
-               break;
        }
 
        return 0;
@@ -2977,8 +2598,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
 {
        static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
        struct acpi_nfit_system_address *spa = nfit_spa->spa;
-       struct nd_blk_region_desc ndbr_desc;
-       struct nd_region_desc *ndr_desc;
+       struct nd_region_desc *ndr_desc, _ndr_desc;
        struct nfit_memdev *nfit_memdev;
        struct nvdimm_bus *nvdimm_bus;
        struct resource res;
@@ -2994,10 +2614,10 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
 
        memset(&res, 0, sizeof(res));
        memset(&mappings, 0, sizeof(mappings));
-       memset(&ndbr_desc, 0, sizeof(ndbr_desc));
+       memset(&_ndr_desc, 0, sizeof(_ndr_desc));
        res.start = spa->address;
        res.end = res.start + spa->length - 1;
-       ndr_desc = &ndbr_desc.ndr_desc;
+       ndr_desc = &_ndr_desc;
        ndr_desc->res = &res;
        ndr_desc->provider_data = nfit_spa;
        ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
@@ -3635,7 +3255,6 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
 
        dev_set_drvdata(dev, acpi_desc);
        acpi_desc->dev = dev;
-       acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
        nd_desc = &acpi_desc->nd_desc;
        nd_desc->provider_name = "ACPI.NFIT";
        nd_desc->module = THIS_MODULE;
index c674f3d..50882bd 100644 (file)
@@ -208,13 +208,9 @@ struct nfit_mem {
        struct nvdimm *nvdimm;
        struct acpi_nfit_memory_map *memdev_dcr;
        struct acpi_nfit_memory_map *memdev_pmem;
-       struct acpi_nfit_memory_map *memdev_bdw;
        struct acpi_nfit_control_region *dcr;
-       struct acpi_nfit_data_region *bdw;
        struct acpi_nfit_system_address *spa_dcr;
-       struct acpi_nfit_system_address *spa_bdw;
        struct acpi_nfit_interleave *idt_dcr;
-       struct acpi_nfit_interleave *idt_bdw;
        struct kernfs_node *flags_attr;
        struct nfit_flush *nfit_flush;
        struct list_head list;
@@ -266,8 +262,6 @@ struct acpi_nfit_desc {
        unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1];
        unsigned int platform_cap;
        unsigned int scrub_tmo;
-       int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
-                       void *iobuf, u64 len, int rw);
        enum nvdimm_fwa_state fwa_state;
        enum nvdimm_fwa_capability fwa_cap;
        int fwa_count;
index ceee808..47ec11d 100644 (file)
@@ -151,8 +151,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic_override *p =
                            (struct acpi_madt_local_apic_override *)header;
-                       pr_info("LAPIC_ADDR_OVR (address[%p])\n",
-                               (void *)(unsigned long)p->address);
+                       pr_info("LAPIC_ADDR_OVR (address[0x%llx])\n",
+                               p->address);
                }
                break;
 
index 2578b2d..e465108 100644 (file)
@@ -1,10 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *  console driver for LCD2S 4x20 character displays connected through i2c.
- *  The display also has a spi interface, but the driver does not support
+ *  Console driver for LCD2S 4x20 character displays connected through i2c.
+ *  The display also has a SPI interface, but the driver does not support
  *  this yet.
  *
- *  This is a driver allowing you to use a LCD2S 4x20 from modtronix
+ *  This is a driver allowing you to use a LCD2S 4x20 from Modtronix
  *  engineering as auxdisplay character device.
  *
  *  (C) 2019 by Lemonage Software GmbH
@@ -12,7 +12,9 @@
  *  All rights reserved.
  */
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
+#include <linux/property.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
@@ -104,7 +106,7 @@ static int lcd2s_print(struct charlcd *lcd, int c)
 static int lcd2s_gotoxy(struct charlcd *lcd, unsigned int x, unsigned int y)
 {
        struct lcd2s_data *lcd2s = lcd->drvdata;
-       u8 buf[] = { LCD2S_CMD_CUR_POS, y + 1, x + 1};
+       u8 buf[3] = { LCD2S_CMD_CUR_POS, y + 1, x + 1 };
 
        lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf));
 
@@ -214,16 +216,15 @@ static int lcd2s_lines(struct charlcd *lcd, enum charlcd_lines lines)
        return 0;
 }
 
+/*
+ * Generator: LGcxxxxx...xx; must have <c> between '0' and '7',
+ * representing the numerical ASCII code of the redefined character,
+ * and <xx...xx> a sequence of 16 hex digits representing 8 bytes
+ * for each character. Most LCDs will only use 5 lower bits of
+ * the 7 first bytes.
+ */
 static int lcd2s_redefine_char(struct charlcd *lcd, char *esc)
 {
-       /* Generator : LGcxxxxx...xx; must have <c> between '0'
-        * and '7', representing the numerical ASCII code of the
-        * redefined character, and <xx...xx> a sequence of 16
-        * hex digits representing 8 bytes for each character.
-        * Most LCDs will only use 5 lower bits of the 7 first
-        * bytes.
-        */
-
        struct lcd2s_data *lcd2s = lcd->drvdata;
        u8 buf[LCD2S_CHARACTER_SIZE + 2] = { LCD2S_CMD_DEF_CUSTOM_CHAR };
        u8 value;
@@ -286,8 +287,7 @@ static const struct charlcd_ops lcd2s_ops = {
        .redefine_char  = lcd2s_redefine_char,
 };
 
-static int lcd2s_i2c_probe(struct i2c_client *i2c,
-                               const struct i2c_device_id *id)
+static int lcd2s_i2c_probe(struct i2c_client *i2c)
 {
        struct charlcd *lcd;
        struct lcd2s_data *lcd2s;
@@ -355,43 +355,22 @@ static const struct i2c_device_id lcd2s_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, lcd2s_i2c_id);
 
-#ifdef CONFIG_OF
 static const struct of_device_id lcd2s_of_table[] = {
        { .compatible = "modtronix,lcd2s" },
        { }
 };
 MODULE_DEVICE_TABLE(of, lcd2s_of_table);
-#endif
 
 static struct i2c_driver lcd2s_i2c_driver = {
        .driver = {
                .name = "lcd2s",
-#ifdef CONFIG_OF
-               .of_match_table = of_match_ptr(lcd2s_of_table),
-#endif
+               .of_match_table = lcd2s_of_table,
        },
-       .probe = lcd2s_i2c_probe,
+       .probe_new = lcd2s_i2c_probe,
        .remove = lcd2s_i2c_remove,
        .id_table = lcd2s_i2c_id,
 };
-
-static int __init lcd2s_modinit(void)
-{
-       int ret = 0;
-
-       ret = i2c_add_driver(&lcd2s_i2c_driver);
-       if (ret != 0)
-               pr_err("Failed to register lcd2s driver\n");
-
-       return ret;
-}
-module_init(lcd2s_modinit)
-
-static void __exit lcd2s_exit(void)
-{
-       i2c_del_driver(&lcd2s_i2c_driver);
-}
-module_exit(lcd2s_exit)
+module_i2c_driver(lcd2s_i2c_driver);
 
 MODULE_DESCRIPTION("LCD2S character display driver");
 MODULE_AUTHOR("Lars Poeschel");
index 7408118..55f4837 100644 (file)
@@ -449,6 +449,7 @@ config RANDOM_TRUST_BOOTLOADER
        device randomness. Say Y here to assume the entropy provided by the
        booloader is trustworthy so it will be added to the kernel's entropy
        pool. Otherwise, say N here so it will be regarded as device input that
-       only mixes the entropy pool.
+       only mixes the entropy pool. This can also be configured at boot with
+       "random.trust_bootloader=on/off".
 
 endmenu
index 66ce7c0..1d82429 100644 (file)
@@ -224,9 +224,10 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void
  *
  * These interfaces will return the requested number of random bytes
  * into the given buffer or as a return value. This is equivalent to
- * a read from /dev/urandom. The integer family of functions may be
- * higher performance for one-off random integers, because they do a
- * bit of buffering.
+ * a read from /dev/urandom. The u32, u64, int, and long family of
+ * functions may be higher performance for one-off random integers,
+ * because they do a bit of buffering and do not invoke reseeding
+ * until the buffer is emptied.
  *
  *********************************************************************/
 
@@ -948,11 +949,17 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force)
  **********************************************************************/
 
 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
 static int __init parse_trust_cpu(char *arg)
 {
        return kstrtobool(arg, &trust_cpu);
 }
+static int __init parse_trust_bootloader(char *arg)
+{
+       return kstrtobool(arg, &trust_bootloader);
+}
 early_param("random.trust_cpu", parse_trust_cpu);
+early_param("random.trust_bootloader", parse_trust_bootloader);
 
 /*
  * The first collection of entropy occurs at system boot while interrupts
@@ -968,6 +975,11 @@ int __init rand_initialize(void)
        bool arch_init = true;
        unsigned long rv;
 
+#if defined(LATENT_ENTROPY_PLUGIN)
+       static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
+       _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
+#endif
+
        for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
                if (!arch_get_random_seed_long_early(&rv) &&
                    !arch_get_random_long_early(&rv)) {
@@ -1128,7 +1140,7 @@ void rand_initialize_disk(struct gendisk *disk)
 void add_hwgenerator_randomness(const void *buffer, size_t count,
                                size_t entropy)
 {
-       if (unlikely(crng_init == 0)) {
+       if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
                size_t ret = crng_pre_init_inject(buffer, count, true);
                mix_pool_bytes(buffer, ret);
                count -= ret;
@@ -1160,7 +1172,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
  */
 void add_bootloader_randomness(const void *buf, size_t size)
 {
-       if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+       if (trust_bootloader)
                add_hwgenerator_randomness(buf, size, size * 8);
        else
                add_device_randomness(buf, size);
diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig
new file mode 100644 (file)
index 0000000..cdbc7d7
--- /dev/null
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_COMMON_CLK=y
+CONFIG_CLK_KUNIT_TEST=y
+CONFIG_CLK_GATE_KUNIT_TEST=y
index d4d67fb..5d596e7 100644 (file)
@@ -59,6 +59,15 @@ config LMK04832
          Say yes here to build support for Texas Instruments' LMK04832 Ultra
          Low-Noise JESD204B Compliant Clock Jitter Cleaner With Dual Loop PLLs
 
+config COMMON_CLK_APPLE_NCO
+       tristate "Clock driver for Apple SoC NCOs"
+       depends on ARCH_APPLE || COMPILE_TEST
+       default ARCH_APPLE
+       help
+         This driver supports NCO (Numerically Controlled Oscillator) blocks
+         found on Apple SoCs such as t8103 (M1). The blocks are typically
+         generators of audio clocks.
+
 config COMMON_CLK_MAX77686
        tristate "Clock driver for Maxim 77620/77686/77802 MFD"
        depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST
@@ -197,6 +206,7 @@ config COMMON_CLK_CDCE925
 config COMMON_CLK_CS2000_CP
        tristate "Clock driver for CS2000 Fractional-N Clock Synthesizer & Clock Multiplier"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for the CS2000 clock multiplier.
 
@@ -233,6 +243,7 @@ config COMMON_CLK_LAN966X
        bool "Generic Clock Controller driver for LAN966X SoC"
        depends on HAS_IOMEM
        depends on OF
+       depends on SOC_LAN966 || COMPILE_TEST
        help
          This driver provides support for Generic Clock Controller(GCK) on
          LAN966X SoC. GCK generates and supplies clock to various peripherals
@@ -332,9 +343,6 @@ config COMMON_CLK_PXA
        help
          Support for the Marvell PXA SoC.
 
-config COMMON_CLK_PIC32
-       def_bool COMMON_CLK && MACH_PIC32
-
 config COMMON_CLK_OXNAS
        bool "Clock driver for the OXNAS SoC Family"
        depends on ARCH_OXNAS || COMPILE_TEST
@@ -342,6 +350,15 @@ config COMMON_CLK_OXNAS
        help
          Support for the OXNAS SoC Family clocks.
 
+config COMMON_CLK_RS9_PCIE
+       tristate "Clock driver for Renesas 9-series PCIe clock generators"
+       depends on I2C
+       depends on OF
+       select REGMAP_I2C
+       help
+         This driver supports the Renesas 9-series PCIe clock generator
+         models 9FGV/9DBV/9DMV/9FGL/9DML/9QXL/9SQ.
+
 config COMMON_CLK_VC5
        tristate "Clock driver for IDT VersaClock 5,6 devices"
        depends on I2C
@@ -409,6 +426,7 @@ source "drivers/clk/keystone/Kconfig"
 source "drivers/clk/mediatek/Kconfig"
 source "drivers/clk/meson/Kconfig"
 source "drivers/clk/mstar/Kconfig"
+source "drivers/clk/microchip/Kconfig"
 source "drivers/clk/mvebu/Kconfig"
 source "drivers/clk/pistachio/Kconfig"
 source "drivers/clk/qcom/Kconfig"
@@ -430,4 +448,19 @@ source "drivers/clk/x86/Kconfig"
 source "drivers/clk/xilinx/Kconfig"
 source "drivers/clk/zynqmp/Kconfig"
 
+# Kunit test cases
+config CLK_KUNIT_TEST
+       tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS
+       depends on KUNIT
+       default KUNIT_ALL_TESTS
+       help
+         Kunit tests for the common clock framework.
+
+config CLK_GATE_KUNIT_TEST
+       tristate "Basic gate type Kunit test" if !KUNIT_ALL_TESTS
+       depends on KUNIT
+       default KUNIT_ALL_TESTS
+       help
+         Kunit test for the basic clk gate type.
+
 endif
index 16e5886..2bd5ffd 100644 (file)
@@ -2,10 +2,12 @@
 # common clock types
 obj-$(CONFIG_HAVE_CLK)         += clk-devres.o clk-bulk.o clkdev.o
 obj-$(CONFIG_COMMON_CLK)       += clk.o
+obj-$(CONFIG_CLK_KUNIT_TEST)   += clk_test.o
 obj-$(CONFIG_COMMON_CLK)       += clk-divider.o
 obj-$(CONFIG_COMMON_CLK)       += clk-fixed-factor.o
 obj-$(CONFIG_COMMON_CLK)       += clk-fixed-rate.o
 obj-$(CONFIG_COMMON_CLK)       += clk-gate.o
+obj-$(CONFIG_CLK_GATE_KUNIT_TEST) += clk-gate_test.o
 obj-$(CONFIG_COMMON_CLK)       += clk-multiplier.o
 obj-$(CONFIG_COMMON_CLK)       += clk-mux.o
 obj-$(CONFIG_COMMON_CLK)       += clk-composite.o
@@ -17,6 +19,7 @@ endif
 
 # hardware specific clock types
 # please keep this section sorted lexicographically by file path name
+obj-$(CONFIG_COMMON_CLK_APPLE_NCO)     += clk-apple-nco.o
 obj-$(CONFIG_MACH_ASM9260)             += clk-asm9260.o
 obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)    += clk-axi-clkgen.o
 obj-$(CONFIG_ARCH_AXXIA)               += clk-axm5516.o
@@ -67,6 +70,7 @@ obj-$(CONFIG_COMMON_CLK_STM32MP157)   += clk-stm32mp1.o
 obj-$(CONFIG_COMMON_CLK_TPS68470)      += clk-tps68470.o
 obj-$(CONFIG_CLK_TWL6040)              += clk-twl6040.o
 obj-$(CONFIG_ARCH_VT8500)              += clk-vt8500.o
+obj-$(CONFIG_COMMON_CLK_RS9_PCIE)      += clk-renesas-pcie.o
 obj-$(CONFIG_COMMON_CLK_VC5)           += clk-versaclock5.o
 obj-$(CONFIG_COMMON_CLK_WM831X)                += clk-wm831x.o
 obj-$(CONFIG_COMMON_CLK_XGENE)         += clk-xgene.o
@@ -91,7 +95,7 @@ obj-$(CONFIG_ARCH_KEYSTONE)           += keystone/
 obj-$(CONFIG_MACH_LOONGSON32)          += loongson1/
 obj-y                                  += mediatek/
 obj-$(CONFIG_ARCH_MESON)               += meson/
-obj-$(CONFIG_MACH_PIC32)               += microchip/
+obj-y                                  += microchip/
 ifeq ($(CONFIG_COMMON_CLK), y)
 obj-$(CONFIG_ARCH_MMP)                 += mmp/
 endif
index 57d06e1..c69a7e2 100644 (file)
@@ -95,7 +95,7 @@
 
 static const struct clk_pll_table clk_audio_pll_table[] = {
        { 0, 45158400 }, { 1, 49152000 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 /* pll clocks */
@@ -138,46 +138,46 @@ static struct clk_factor_table sd_factor_table[] = {
        { 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
        { 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
        { 280, 1, 25 * 128 },
-       { 0, 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_factor_table de_factor_table[] = {
        { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
        { 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
        { 8, 1, 12 },
-       { 0, 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_factor_table hde_factor_table[] = {
        { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
        { 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
-       { 0, 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table rmii_ref_div_table[] = {
        { 0, 4 }, { 1, 10 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table std12rate_div_table[] = {
        { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
        { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
        { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table i2s_div_table[] = {
        { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
        { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
        { 8, 24 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table nand_div_table[] = {
        { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 6 },
        { 4, 8 }, { 5, 10 }, { 6, 12 }, { 7, 14 },
        { 8, 16 }, { 9, 18 }, { 10, 20 }, { 11, 22 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 /* mux clock */
index a2f34d1..3e48105 100644 (file)
@@ -73,7 +73,7 @@
 
 static struct clk_pll_table clk_audio_pll_table[] = {
        {0, 45158400}, {1, 49152000},
-       {0, 0},
+       { /* sentinel */ }
 };
 
 static struct clk_pll_table clk_cvbs_pll_table[] = {
@@ -82,7 +82,8 @@ static struct clk_pll_table clk_cvbs_pll_table[] = {
        {33, 35 * 12000000}, {34, 36 * 12000000}, {35, 37 * 12000000},
        {36, 38 * 12000000}, {37, 39 * 12000000}, {38, 40 * 12000000},
        {39, 41 * 12000000}, {40, 42 * 12000000}, {41, 43 * 12000000},
-       {42, 44 * 12000000}, {43, 45 * 12000000}, {0, 0},
+       {42, 44 * 12000000}, {43, 45 * 12000000},
+       { /* sentinel */ }
 };
 
 /* pll clocks */
@@ -137,7 +138,7 @@ static struct clk_factor_table sd_factor_table[] = {
        {276, 1, 21 * 128}, {277, 1, 22 * 128}, {278, 1, 23 * 128}, {279, 1, 24 * 128},
        {280, 1, 25 * 128}, {281, 1, 26 * 128},
 
-       {0, 0},
+       { /* sentinel */ }
 };
 
 static struct clk_factor_table lcd_factor_table[] = {
@@ -150,18 +151,19 @@ static struct clk_factor_table lcd_factor_table[] = {
        {256, 1, 1 * 7}, {257, 1, 2 * 7}, {258, 1, 3 * 7}, {259, 1, 4 * 7},
        {260, 1, 5 * 7}, {261, 1, 6 * 7}, {262, 1, 7 * 7}, {263, 1, 8 * 7},
        {264, 1, 9 * 7}, {265, 1, 10 * 7}, {266, 1, 11 * 7}, {267, 1, 12 * 7},
-       {0, 0},
+       { /* sentinel */ }
 };
 
 static struct clk_div_table hdmia_div_table[] = {
        {0, 1},   {1, 2},   {2, 3},   {3, 4},
        {4, 6},   {5, 8},   {6, 12},  {7, 16},
        {8, 24},
-       {0, 0},
+       { /* sentinel */ }
 };
 
 static struct clk_div_table rmii_div_table[] = {
        {0, 4},   {1, 10},
+       { /* sentinel */ }
 };
 
 /* divider clocks */
@@ -178,13 +180,14 @@ static OWL_DIVIDER(clk_rmii_ref, "rmii_ref", "ethernet_pll", CMU_ETHERNETPLL, 2,
 static struct clk_factor_table de_factor_table[] = {
        {0, 1, 1}, {1, 2, 3}, {2, 1, 2}, {3, 2, 5},
        {4, 1, 3}, {5, 1, 4}, {6, 1, 6}, {7, 1, 8},
-       {8, 1, 12}, {0, 0, 0},
+       {8, 1, 12},
+       { /* sentinel */ }
 };
 
 static struct clk_factor_table hde_factor_table[] = {
        {0, 1, 1}, {1, 2, 3}, {2, 1, 2}, {3, 2, 5},
        {4, 1, 3}, {5, 1, 4}, {6, 1, 6}, {7, 1, 8},
-       {0, 0, 0},
+       { /* sentinel */ }
 };
 
 /* gate clocks */
index 7908909..7dc6e07 100644 (file)
 
 static struct clk_pll_table clk_audio_pll_table[] = {
        { 0, 45158400 }, { 1, 49152000 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_pll_table clk_edp_pll_table[] = {
        { 0, 810000000 }, { 1, 135000000 }, { 2, 270000000 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 /* pll clocks */
@@ -120,41 +120,41 @@ static struct clk_div_table nand_div_table[] = {
        { 4, 8 }, { 5, 10 }, { 6, 12 }, { 7, 14 },
        { 8, 16 }, { 9, 18 }, { 10, 20 }, { 11, 22 },
        { 12, 24 }, { 13, 26 }, { 14, 28 }, { 15, 30 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table apb_div_table[] = {
        { 1, 2 }, { 2, 3 }, { 3, 4 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table eth_mac_div_table[] = {
        { 0, 2 }, { 1, 4 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table rmii_ref_div_table[] = {
        { 0, 4 },         { 1, 10 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table usb3_mac_div_table[] = {
        { 1, 2 }, { 2, 3 }, { 3, 4 },
-       { 0, 8 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table i2s_div_table[] = {
        { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
        { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
        { 8, 24 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_div_table hdmia_div_table[] = {
        { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
        { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
        { 8, 24 },
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 /* divider clocks */
@@ -185,24 +185,24 @@ static struct clk_factor_table sd_factor_table[] = {
        { 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
        { 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
 
-       { 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_factor_table dmm_factor_table[] = {
        { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 1, 3 },
        { 4, 1, 4 },
-       { 0, 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_factor_table noc_factor_table[] = {
        { 0, 1, 1 },   { 1, 2, 3 }, { 2, 1, 2 }, { 3, 1, 3 }, { 4, 1, 4 },
-       { 0, 0, 0 },
+       { /* sentinel */ }
 };
 
 static struct clk_factor_table bisp_factor_table[] = {
        { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
        { 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
-       { 0, 0, 0 },
+       { /* sentinel */ }
 };
 
 /* factor clocks */
index fff4fdd..b174f72 100644 (file)
@@ -143,8 +143,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
                                           parent_names,
                                           &at91rm9200_master_layout,
                                           &rm9200_mck_characteristics,
-                                          &rm9200_mck_lock, CLK_SET_RATE_GATE,
-                                          INT_MIN);
+                                          &rm9200_mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index 79802f8..11550e5 100644 (file)
@@ -419,8 +419,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
                                           parent_names,
                                           &at91rm9200_master_layout,
                                           data->mck_characteristics,
-                                          &at91sam9260_mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          &at91sam9260_mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index 7ed984f..8c93444 100644 (file)
@@ -154,8 +154,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
                                           parent_names,
                                           &at91rm9200_master_layout,
                                           &mck_characteristics,
-                                          &at91sam9g45_mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          &at91sam9g45_mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index 63cc589..0bb1940 100644 (file)
@@ -181,8 +181,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
                                           parent_names,
                                           &at91sam9x5_master_layout,
                                           &mck_characteristics,
-                                          &at91sam9n12_mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          &at91sam9n12_mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index 4d4faf6..b992137 100644 (file)
@@ -123,8 +123,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
                                           parent_names,
                                           &at91rm9200_master_layout,
                                           &sam9rl_mck_characteristics,
-                                          &sam9rl_mck_lock, CLK_SET_RATE_GATE,
-                                          INT_MIN);
+                                          &sam9rl_mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index bd8007b..3857db2 100644 (file)
@@ -201,8 +201,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
        hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
                                           parent_names,
                                           &at91sam9x5_master_layout,
-                                          &mck_characteristics, &mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          &mck_characteristics, &mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index b2d0a7f..164e295 100644 (file)
@@ -374,85 +374,6 @@ static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
        }
 }
 
-static int clk_master_pres_determine_rate(struct clk_hw *hw,
-                                         struct clk_rate_request *req)
-{
-       struct clk_master *master = to_clk_master(hw);
-       struct clk_rate_request req_parent = *req;
-       const struct clk_master_characteristics *characteristics =
-                                                       master->characteristics;
-       struct clk_hw *parent;
-       long best_rate = LONG_MIN, best_diff = LONG_MIN;
-       u32 pres;
-       int i;
-
-       if (master->chg_pid < 0)
-               return -EOPNOTSUPP;
-
-       parent = clk_hw_get_parent_by_index(hw, master->chg_pid);
-       if (!parent)
-               return -EOPNOTSUPP;
-
-       for (i = 0; i <= MASTER_PRES_MAX; i++) {
-               if (characteristics->have_div3_pres && i == MASTER_PRES_MAX)
-                       pres = 3;
-               else
-                       pres = 1 << i;
-
-               req_parent.rate = req->rate * pres;
-               if (__clk_determine_rate(parent, &req_parent))
-                       continue;
-
-               clk_sama7g5_master_best_diff(req, parent, req_parent.rate,
-                                            &best_diff, &best_rate, pres);
-               if (!best_diff)
-                       break;
-       }
-
-       return 0;
-}
-
-static int clk_master_pres_set_rate(struct clk_hw *hw, unsigned long rate,
-                                   unsigned long parent_rate)
-{
-       struct clk_master *master = to_clk_master(hw);
-       unsigned long flags;
-       unsigned int pres, mckr, tmp;
-       int ret;
-
-       pres = DIV_ROUND_CLOSEST(parent_rate, rate);
-       if (pres > MASTER_PRES_MAX)
-               return -EINVAL;
-
-       else if (pres == 3)
-               pres = MASTER_PRES_MAX;
-       else if (pres)
-               pres = ffs(pres) - 1;
-
-       spin_lock_irqsave(master->lock, flags);
-       ret = regmap_read(master->regmap, master->layout->offset, &mckr);
-       if (ret)
-               goto unlock;
-
-       mckr &= master->layout->mask;
-       tmp = (mckr >> master->layout->pres_shift) & MASTER_PRES_MASK;
-       if (pres == tmp)
-               goto unlock;
-
-       mckr &= ~(MASTER_PRES_MASK << master->layout->pres_shift);
-       mckr |= (pres << master->layout->pres_shift);
-       ret = regmap_write(master->regmap, master->layout->offset, mckr);
-       if (ret)
-               goto unlock;
-
-       while (!clk_master_ready(master))
-               cpu_relax();
-unlock:
-       spin_unlock_irqrestore(master->lock, flags);
-
-       return ret;
-}
-
 static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw,
                                                 unsigned long parent_rate)
 {
@@ -539,13 +460,6 @@ static void clk_master_pres_restore_context(struct clk_hw *hw)
                pr_warn("MCKR PRES was not configured properly by firmware!\n");
 }
 
-static void clk_master_pres_restore_context_chg(struct clk_hw *hw)
-{
-       struct clk_master *master = to_clk_master(hw);
-
-       clk_master_pres_set_rate(hw, master->pms.rate, master->pms.parent_rate);
-}
-
 static const struct clk_ops master_pres_ops = {
        .prepare = clk_master_prepare,
        .is_prepared = clk_master_is_prepared,
@@ -555,25 +469,13 @@ static const struct clk_ops master_pres_ops = {
        .restore_context = clk_master_pres_restore_context,
 };
 
-static const struct clk_ops master_pres_ops_chg = {
-       .prepare = clk_master_prepare,
-       .is_prepared = clk_master_is_prepared,
-       .determine_rate = clk_master_pres_determine_rate,
-       .recalc_rate = clk_master_pres_recalc_rate,
-       .get_parent = clk_master_pres_get_parent,
-       .set_rate = clk_master_pres_set_rate,
-       .save_context = clk_master_pres_save_context,
-       .restore_context = clk_master_pres_restore_context_chg,
-};
-
 static struct clk_hw * __init
 at91_clk_register_master_internal(struct regmap *regmap,
                const char *name, int num_parents,
                const char **parent_names,
                const struct clk_master_layout *layout,
                const struct clk_master_characteristics *characteristics,
-               const struct clk_ops *ops, spinlock_t *lock, u32 flags,
-               int chg_pid)
+               const struct clk_ops *ops, spinlock_t *lock, u32 flags)
 {
        struct clk_master *master;
        struct clk_init_data init;
@@ -599,7 +501,6 @@ at91_clk_register_master_internal(struct regmap *regmap,
        master->layout = layout;
        master->characteristics = characteristics;
        master->regmap = regmap;
-       master->chg_pid = chg_pid;
        master->lock = lock;
 
        if (ops == &master_div_ops_chg) {
@@ -628,19 +529,13 @@ at91_clk_register_master_pres(struct regmap *regmap,
                const char **parent_names,
                const struct clk_master_layout *layout,
                const struct clk_master_characteristics *characteristics,
-               spinlock_t *lock, u32 flags, int chg_pid)
+               spinlock_t *lock)
 {
-       const struct clk_ops *ops;
-
-       if (flags & CLK_SET_RATE_GATE)
-               ops = &master_pres_ops;
-       else
-               ops = &master_pres_ops_chg;
-
        return at91_clk_register_master_internal(regmap, name, num_parents,
                                                 parent_names, layout,
-                                                characteristics, ops,
-                                                lock, flags, chg_pid);
+                                                characteristics,
+                                                &master_pres_ops,
+                                                lock, CLK_SET_RATE_GATE);
 }
 
 struct clk_hw * __init
@@ -661,7 +556,7 @@ at91_clk_register_master_div(struct regmap *regmap,
        hw = at91_clk_register_master_internal(regmap, name, 1,
                                               &parent_name, layout,
                                               characteristics, ops,
-                                              lock, flags, -EINVAL);
+                                              lock, flags);
 
        if (!IS_ERR(hw) && safe_div) {
                master_div = to_clk_master(hw);
index ca2dbb6..8ca8bca 100644 (file)
@@ -392,8 +392,7 @@ of_at91_clk_master_setup(struct device_node *np,
 
        hw = at91_clk_register_master_pres(regmap, "masterck_pres", num_parents,
                                           parent_names, layout,
-                                          characteristics, &mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          characteristics, &mck_lock);
        if (IS_ERR(hw))
                goto out_free_characteristics;
 
index 3a1bf61..efe4975 100644 (file)
@@ -175,7 +175,7 @@ at91_clk_register_master_pres(struct regmap *regmap, const char *name,
                              int num_parents, const char **parent_names,
                              const struct clk_master_layout *layout,
                              const struct clk_master_characteristics *characteristics,
-                             spinlock_t *lock, u32 flags, int chg_pid);
+                             spinlock_t *lock);
 
 struct clk_hw * __init
 at91_clk_register_master_div(struct regmap *regmap, const char *name,
index 5c26418..9ea4ce5 100644 (file)
@@ -271,8 +271,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
        parent_names[2] = "pllack_divck";
        hw = at91_clk_register_master_pres(regmap, "masterck_pres", 3,
                                           parent_names, &sam9x60_master_layout,
-                                          &mck_characteristics, &mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          &mck_characteristics, &mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index d027294..cfd0f5e 100644 (file)
@@ -168,7 +168,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
        if (IS_ERR(regmap))
                return;
 
-       sama5d2_pmc = pmc_data_allocate(PMC_AUDIOPLLCK + 1,
+       sama5d2_pmc = pmc_data_allocate(PMC_AUDIOPINCK + 1,
                                        nck(sama5d2_systemck),
                                        nck(sama5d2_periph32ck),
                                        nck(sama5d2_gck), 3);
@@ -216,6 +216,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
        if (IS_ERR(hw))
                goto err_free;
 
+       sama5d2_pmc->chws[PMC_AUDIOPINCK] = hw;
+
        hw = at91_clk_register_audio_pll_pmc(regmap, "audiopll_pmcck",
                                             "audiopll_fracck");
        if (IS_ERR(hw))
@@ -240,8 +242,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
        hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
                                           parent_names,
                                           &at91sam9x5_master_layout,
-                                          &mck_characteristics, &mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          &mck_characteristics, &mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index 339d0f3..7e93c6e 100644 (file)
@@ -175,8 +175,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
        hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
                                           parent_names,
                                           &at91sam9x5_master_layout,
-                                          &mck_characteristics, &mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          &mck_characteristics, &mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index 4af75b1..1a14a9b 100644 (file)
@@ -190,8 +190,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
        hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
                                           parent_names,
                                           &at91sam9x5_master_layout,
-                                          &mck_characteristics, &mck_lock,
-                                          CLK_SET_RATE_GATE, INT_MIN);
+                                          &mck_characteristics, &mck_lock);
        if (IS_ERR(hw))
                goto err_free;
 
index 369dfaf..9a213ba 100644 (file)
@@ -302,6 +302,7 @@ static const struct {
  * @ep_count:          extra parents count
  * @ep_mux_table:      mux table for extra parents
  * @id:                        clock id
+ * @eid:               export index in sama7g5->chws[] array
  * @c:                 true if clock is critical and cannot be disabled
  */
 static const struct {
@@ -311,6 +312,7 @@ static const struct {
        u8 ep_count;
        u8 ep_mux_table[4];
        u8 id;
+       u8 eid;
        u8 c;
 } sama7g5_mckx[] = {
        { .n = "mck1",
@@ -319,6 +321,7 @@ static const struct {
          .ep_mux_table = { 5, },
          .ep_count = 1,
          .ep_chg_id = INT_MIN,
+         .eid = PMC_MCK1,
          .c = 1, },
 
        { .n = "mck2",
@@ -696,16 +699,16 @@ static const struct {
        { .n  = "pdmc0_gclk",
          .id = 68,
          .r = { .max = 50000000  },
-         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
-         .pp_mux_table = { 5, 8, },
+         .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+         .pp_mux_table = { 5, 9, },
          .pp_count = 2,
          .pp_chg_id = INT_MIN, },
 
        { .n  = "pdmc1_gclk",
          .id = 69,
          .r = { .max = 50000000, },
-         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
-         .pp_mux_table = { 5, 8, },
+         .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+         .pp_mux_table = { 5, 9, },
          .pp_count = 2,
          .pp_chg_id = INT_MIN, },
 
@@ -913,7 +916,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
        if (IS_ERR(regmap))
                return;
 
-       sama7g5_pmc = pmc_data_allocate(PMC_CPU + 1,
+       sama7g5_pmc = pmc_data_allocate(PMC_MCK1 + 1,
                                        nck(sama7g5_systemck),
                                        nck(sama7g5_periphck),
                                        nck(sama7g5_gck), 8);
@@ -1027,6 +1030,9 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
                        goto err_free;
 
                alloc_mem[alloc_mem_size++] = mux_table;
+
+               if (sama7g5_mckx[i].eid)
+                       sama7g5_pmc->chws[sama7g5_mckx[i].eid] = hw;
        }
 
        hw = at91_clk_sama7g5_register_utmi(regmap, "utmick", "main_xtal");
index 2d65770..fdc9b66 100644 (file)
@@ -535,7 +535,7 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
 
        /*
         * Assume that if it has already been selected (for example by the
-        * bootloader), enough time has aready passed.
+        * bootloader), enough time has already passed.
         */
        if ((readl(osc->sckcr) & osc->bits->cr_oscsel)) {
                osc->prepared = true;
index f95959f..a3f349d 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * ARTPEC-6 clock initialization
  *
- * Copyright 2015-2016 Axis Comunications AB.
+ * Copyright 2015-2016 Axis Communications AB.
  */
 
 #include <linux/clk-provider.h>
index 3667b4d..3ad20e7 100644 (file)
@@ -939,10 +939,8 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
        u32 unused_frac_mask =
                GENMASK(CM_DIV_FRAC_BITS - data->frac_bits, 0) >> 1;
        u64 temp = (u64)parent_rate << CM_DIV_FRAC_BITS;
-       u64 rem;
        u32 div, mindiv, maxdiv;
 
-       rem = do_div(temp, rate);
        div = temp;
        div &= ~unused_frac_mask;
 
index a48ddd3..d7e5b94 100644 (file)
@@ -89,7 +89,7 @@
  * Parameters for VCO frequency configuration
  *
  * VCO frequency =
- * ((ndiv_int + ndiv_frac / 2^20) * (ref freqeuncy  / pdiv)
+ * ((ndiv_int + ndiv_frac / 2^20) * (ref frequency  / pdiv)
  */
 struct iproc_pll_vco_param {
        unsigned long rate;
index e65eeef..5dd6516 100644 (file)
@@ -510,7 +510,7 @@ static bool kona_clk_valid(struct kona_clk *bcm_clk)
  * placeholders for non-supported clocks.  Keep track of the
  * position of each clock name in the original array.
  *
- * Allocates an array of pointers to to hold the names of all
+ * Allocates an array of pointers to hold the names of all
  * non-null entries in the original array, and returns a pointer to
  * that array in *names.  This will be used for registering the
  * clock with the common clock code.  On successful return,
index dd3b71e..9d09621 100644 (file)
@@ -56,6 +56,8 @@ static char *rpi_firmware_clk_names[] = {
 #define RPI_FIRMWARE_STATE_ENABLE_BIT  BIT(0)
 #define RPI_FIRMWARE_STATE_WAIT_BIT    BIT(1)
 
+struct raspberrypi_clk_variant;
+
 struct raspberrypi_clk {
        struct device *dev;
        struct rpi_firmware *firmware;
@@ -66,10 +68,72 @@ struct raspberrypi_clk_data {
        struct clk_hw hw;
 
        unsigned int id;
+       struct raspberrypi_clk_variant *variant;
 
        struct raspberrypi_clk *rpi;
 };
 
+struct raspberrypi_clk_variant {
+       bool            export;
+       char            *clkdev;
+       unsigned long   min_rate;
+       bool            minimize;
+};
+
+static struct raspberrypi_clk_variant
+raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
+       [RPI_FIRMWARE_ARM_CLK_ID] = {
+               .export = true,
+               .clkdev = "cpu0",
+       },
+       [RPI_FIRMWARE_CORE_CLK_ID] = {
+               .export = true,
+
+               /*
+                * The clock is shared between the HVS and the CSI
+                * controllers, on the BCM2711 and will change depending
+                * on the pixels composited on the HVS and the capture
+                * resolution on Unicam.
+                *
+                * Since the rate can get quite large, and we need to
+                * coordinate between both driver instances, let's
+                * always use the minimum the drivers will let us.
+                */
+               .minimize = true,
+       },
+       [RPI_FIRMWARE_M2MC_CLK_ID] = {
+               .export = true,
+
+               /*
+                * If we boot without any cable connected to any of the
+                * HDMI connector, the firmware will skip the HSM
+                * initialization and leave it with a rate of 0,
+                * resulting in a bus lockup when we're accessing the
+                * registers even if it's enabled.
+                *
+                * Let's put a sensible default so that we don't end up
+                * in this situation.
+                */
+               .min_rate = 120000000,
+
+               /*
+                * The clock is shared between the two HDMI controllers
+                * on the BCM2711 and will change depending on the
+                * resolution output on each. Since the rate can get
+                * quite large, and we need to coordinate between both
+                * driver instances, let's always use the minimum the
+                * drivers will let us.
+                */
+               .minimize = true,
+       },
+       [RPI_FIRMWARE_V3D_CLK_ID] = {
+               .export = true,
+       },
+       [RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
+               .export = true,
+       },
+};
+
 /*
  * Structure of the message passed to Raspberry Pi's firmware in order to
  * change clock rates. The 'disable_turbo' option is only available to the ARM
@@ -165,12 +229,26 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
 static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
                                              struct clk_rate_request *req)
 {
+       struct raspberrypi_clk_data *data =
+               container_of(hw, struct raspberrypi_clk_data, hw);
+       struct raspberrypi_clk_variant *variant = data->variant;
+
        /*
         * The firmware will do the rounding but that isn't part of
         * the interface with the firmware, so we just do our best
         * here.
         */
+
        req->rate = clamp(req->rate, req->min_rate, req->max_rate);
+
+       /*
+        * We want to aggressively reduce the clock rate here, so let's
+        * just ignore the requested rate and return the bare minimum
+        * rate we can get away with.
+        */
+       if (variant->minimize && req->min_rate > 0)
+               req->rate = req->min_rate;
+
        return 0;
 }
 
@@ -183,7 +261,8 @@ static const struct clk_ops raspberrypi_firmware_clk_ops = {
 
 static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
                                               unsigned int parent,
-                                              unsigned int id)
+                                              unsigned int id,
+                                              struct raspberrypi_clk_variant *variant)
 {
        struct raspberrypi_clk_data *data;
        struct clk_init_data init = {};
@@ -195,6 +274,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
                return ERR_PTR(-ENOMEM);
        data->rpi = rpi;
        data->id = id;
+       data->variant = variant;
 
        init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
                                   "fw-clk-%s",
@@ -228,15 +308,28 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
 
        clk_hw_set_rate_range(&data->hw, min_rate, max_rate);
 
-       if (id == RPI_FIRMWARE_ARM_CLK_ID) {
+       if (variant->clkdev) {
                ret = devm_clk_hw_register_clkdev(rpi->dev, &data->hw,
-                                                 NULL, "cpu0");
+                                                 NULL, variant->clkdev);
                if (ret) {
                        dev_err(rpi->dev, "Failed to initialize clkdev\n");
                        return ERR_PTR(ret);
                }
        }
 
+       if (variant->min_rate) {
+               unsigned long rate;
+
+               clk_hw_set_rate_range(&data->hw, variant->min_rate, max_rate);
+
+               rate = raspberrypi_fw_get_rate(&data->hw, 0);
+               if (rate < variant->min_rate) {
+                       ret = raspberrypi_fw_set_rate(&data->hw, variant->min_rate, 0);
+                       if (ret)
+                               return ERR_PTR(ret);
+               }
+       }
+
        return &data->hw;
 }
 
@@ -264,27 +357,27 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
                return ret;
 
        while (clks->id) {
-               struct clk_hw *hw;
-
-               switch (clks->id) {
-               case RPI_FIRMWARE_ARM_CLK_ID:
-               case RPI_FIRMWARE_CORE_CLK_ID:
-               case RPI_FIRMWARE_M2MC_CLK_ID:
-               case RPI_FIRMWARE_V3D_CLK_ID:
-               case RPI_FIRMWARE_PIXEL_BVB_CLK_ID:
+               struct raspberrypi_clk_variant *variant;
+
+               if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
+                       dev_err(rpi->dev, "Unknown clock id: %u", clks->id);
+                       return -EINVAL;
+               }
+
+               variant = &raspberrypi_clk_variants[clks->id];
+               if (variant->export) {
+                       struct clk_hw *hw;
+
                        hw = raspberrypi_clk_register(rpi, clks->parent,
-                                                     clks->id);
+                                                     clks->id, variant);
                        if (IS_ERR(hw))
                                return PTR_ERR(hw);
 
                        data->hws[clks->id] = hw;
                        data->num = clks->id + 1;
-                       fallthrough;
-
-               default:
-                       clks++;
-                       break;
                }
+
+               clks++;
        }
 
        return 0;
diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c
new file mode 100644 (file)
index 0000000..39472a5
--- /dev/null
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Driver for an SoC block (Numerically Controlled Oscillator)
+ * found on t8103 (M1) and other Apple chips
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define NCO_CHANNEL_STRIDE     0x4000
+#define NCO_CHANNEL_REGSIZE    20
+
+#define REG_CTRL       0
+#define CTRL_ENABLE    BIT(31)
+#define REG_DIV                4
+#define DIV_FINE       GENMASK(1, 0)
+#define DIV_COARSE     GENMASK(12, 2)
+#define REG_INC1       8
+#define REG_INC2       12
+#define REG_ACCINIT    16
+
+/*
+ * Theory of operation (postulated)
+ *
+ * The REG_DIV register indirectly expresses a base integer divisor, roughly
+ * corresponding to twice the desired ratio of input to output clock. This
+ * base divisor is adjusted on a cycle-by-cycle basis based on the state of a
+ * 32-bit phase accumulator to achieve a desired precise clock ratio over the
+ * long term.
+ *
+ * Specifically an output clock cycle is produced after (REG_DIV divisor)/2
+ * or (REG_DIV divisor + 1)/2 input cycles, the latter taking effect when top
+ * bit of the 32-bit accumulator is set. The accumulator is incremented each
+ * produced output cycle, by the value from either REG_INC1 or REG_INC2, which
+ * of the two is selected depending again on the accumulator's current top bit.
+ *
+ * Because the NCO hardware implements counting of input clock cycles in part
+ * in a Galois linear-feedback shift register, the higher bits of divisor
+ * are programmed into REG_DIV by picking an appropriate LFSR state. See
+ * applnco_compute_tables/applnco_div_translate for details on this.
+ */
+
+#define LFSR_POLY      0xa01
+#define LFSR_INIT      0x7ff
+#define LFSR_LEN       11
+#define LFSR_PERIOD    ((1 << LFSR_LEN) - 1)
+#define LFSR_TBLSIZE   (1 << LFSR_LEN)
+
+/* The minimal attainable coarse divisor (first value in table) */
+#define COARSE_DIV_OFFSET 2
+
+struct applnco_tables {
+       u16 fwd[LFSR_TBLSIZE];
+       u16 inv[LFSR_TBLSIZE];
+};
+
+struct applnco_channel {
+       void __iomem *base;
+       struct applnco_tables *tbl;
+       struct clk_hw hw;
+
+       spinlock_t lock;
+};
+
+#define to_applnco_channel(_hw) container_of(_hw, struct applnco_channel, hw)
+
+static void applnco_enable_nolock(struct clk_hw *hw)
+{
+       struct applnco_channel *chan = to_applnco_channel(hw);
+       u32 val;
+
+       val = readl_relaxed(chan->base + REG_CTRL);
+       writel_relaxed(val | CTRL_ENABLE, chan->base + REG_CTRL);
+}
+
+static void applnco_disable_nolock(struct clk_hw *hw)
+{
+       struct applnco_channel *chan = to_applnco_channel(hw);
+       u32 val;
+
+       val = readl_relaxed(chan->base + REG_CTRL);
+       writel_relaxed(val & ~CTRL_ENABLE, chan->base + REG_CTRL);
+}
+
+static int applnco_is_enabled(struct clk_hw *hw)
+{
+       struct applnco_channel *chan = to_applnco_channel(hw);
+
+       return (readl_relaxed(chan->base + REG_CTRL) & CTRL_ENABLE) != 0;
+}
+
+static void applnco_compute_tables(struct applnco_tables *tbl)
+{
+       int i;
+       u32 state = LFSR_INIT;
+
+       /*
+        * Go through the states of a Galois LFSR and build
+        * a coarse divisor translation table.
+        */
+       for (i = LFSR_PERIOD; i > 0; i--) {
+               if (state & 1)
+                       state = (state >> 1) ^ (LFSR_POLY >> 1);
+               else
+                       state = (state >> 1);
+               tbl->fwd[i] = state;
+               tbl->inv[state] = i;
+       }
+
+       /* Zero value is special-cased */
+       tbl->fwd[0] = 0;
+       tbl->inv[0] = 0;
+}
+
+static bool applnco_div_out_of_range(unsigned int div)
+{
+       unsigned int coarse = div / 4;
+
+       return coarse < COARSE_DIV_OFFSET ||
+               coarse >= COARSE_DIV_OFFSET + LFSR_TBLSIZE;
+}
+
+static u32 applnco_div_translate(struct applnco_tables *tbl, unsigned int div)
+{
+       unsigned int coarse = div / 4;
+
+       if (WARN_ON(applnco_div_out_of_range(div)))
+               return 0;
+
+       return FIELD_PREP(DIV_COARSE, tbl->fwd[coarse - COARSE_DIV_OFFSET]) |
+                       FIELD_PREP(DIV_FINE, div % 4);
+}
+
+static unsigned int applnco_div_translate_inv(struct applnco_tables *tbl, u32 regval)
+{
+       unsigned int coarse, fine;
+
+       coarse = tbl->inv[FIELD_GET(DIV_COARSE, regval)] + COARSE_DIV_OFFSET;
+       fine = FIELD_GET(DIV_FINE, regval);
+
+       return coarse * 4 + fine;
+}
+
+static int applnco_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct applnco_channel *chan = to_applnco_channel(hw);
+       unsigned long flags;
+       u32 div, inc1, inc2;
+       bool was_enabled;
+
+       div = 2 * parent_rate / rate;
+       inc1 = 2 * parent_rate - div * rate;
+       inc2 = inc1 - rate;
+
+       if (applnco_div_out_of_range(div))
+               return -EINVAL;
+
+       div = applnco_div_translate(chan->tbl, div);
+
+       spin_lock_irqsave(&chan->lock, flags);
+       was_enabled = applnco_is_enabled(hw);
+       applnco_disable_nolock(hw);
+
+       writel_relaxed(div,  chan->base + REG_DIV);
+       writel_relaxed(inc1, chan->base + REG_INC1);
+       writel_relaxed(inc2, chan->base + REG_INC2);
+
+       /* Presumably a neutral initial value for accumulator */
+       writel_relaxed(1 << 31, chan->base + REG_ACCINIT);
+
+       if (was_enabled)
+               applnco_enable_nolock(hw);
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       return 0;
+}
+
+static unsigned long applnco_recalc_rate(struct clk_hw *hw,
+                               unsigned long parent_rate)
+{
+       struct applnco_channel *chan = to_applnco_channel(hw);
+       u32 div, inc1, inc2, incbase;
+
+       div = applnco_div_translate_inv(chan->tbl,
+                       readl_relaxed(chan->base + REG_DIV));
+
+       inc1 = readl_relaxed(chan->base + REG_INC1);
+       inc2 = readl_relaxed(chan->base + REG_INC2);
+
+       /*
+        * We don't support wraparound of accumulator
+        * nor the edge case of both increments being zero
+        */
+       if (inc1 >= (1 << 31) || inc2 < (1 << 31) || (inc1 == 0 && inc2 == 0))
+               return 0;
+
+       /* Scale both sides of division by incbase to maintain precision */
+       incbase = inc1 - inc2;
+
+       return div64_u64(((u64) parent_rate) * 2 * incbase,
+                       ((u64) div) * incbase + inc1);
+}
+
+static long applnco_round_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long *parent_rate)
+{
+       unsigned long lo = *parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
+       unsigned long hi = *parent_rate / COARSE_DIV_OFFSET;
+
+       return clamp(rate, lo, hi);
+}
+
+static int applnco_enable(struct clk_hw *hw)
+{
+       struct applnco_channel *chan = to_applnco_channel(hw);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       applnco_enable_nolock(hw);
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       return 0;
+}
+
+static void applnco_disable(struct clk_hw *hw)
+{
+       struct applnco_channel *chan = to_applnco_channel(hw);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       applnco_disable_nolock(hw);
+       spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static const struct clk_ops applnco_ops = {
+       .set_rate = applnco_set_rate,
+       .recalc_rate = applnco_recalc_rate,
+       .round_rate = applnco_round_rate,
+       .enable = applnco_enable,
+       .disable = applnco_disable,
+       .is_enabled = applnco_is_enabled,
+};
+
+static int applnco_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct clk_parent_data pdata = { .index = 0 };
+       struct clk_init_data init;
+       struct clk_hw_onecell_data *onecell_data;
+       void __iomem *base;
+       struct resource *res;
+       struct applnco_tables *tbl;
+       unsigned int nchannels;
+       int ret, i;
+
+       base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       if (resource_size(res) < NCO_CHANNEL_REGSIZE)
+               return -EINVAL;
+       nchannels = (resource_size(res) - NCO_CHANNEL_REGSIZE)
+                       / NCO_CHANNEL_STRIDE + 1;
+
+       onecell_data = devm_kzalloc(&pdev->dev, struct_size(onecell_data, hws,
+                                                       nchannels), GFP_KERNEL);
+       if (!onecell_data)
+               return -ENOMEM;
+       onecell_data->num = nchannels;
+
+       tbl = devm_kzalloc(&pdev->dev, sizeof(*tbl), GFP_KERNEL);
+       if (!tbl)
+               return -ENOMEM;
+       applnco_compute_tables(tbl);
+
+       for (i = 0; i < nchannels; i++) {
+               struct applnco_channel *chan;
+
+               chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
+               if (!chan)
+                       return -ENOMEM;
+               chan->base = base + NCO_CHANNEL_STRIDE * i;
+               chan->tbl = tbl;
+               spin_lock_init(&chan->lock);
+
+               memset(&init, 0, sizeof(init));
+               init.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+                                               "%s-%d", np->name, i);
+               init.ops = &applnco_ops;
+               init.parent_data = &pdata;
+               init.num_parents = 1;
+               init.flags = 0;
+
+               chan->hw.init = &init;
+               ret = devm_clk_hw_register(&pdev->dev, &chan->hw);
+               if (ret)
+                       return ret;
+
+               onecell_data->hws[i] = &chan->hw;
+       }
+
+       return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+                                                       onecell_data);
+}
+
+static const struct of_device_id applnco_ids[] = {
+       { .compatible = "apple,nco" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, applnco_ids);
+
+static struct platform_driver applnco_driver = {
+       .driver = {
+               .name = "apple-nco",
+               .of_match_table = applnco_ids,
+       },
+       .probe = applnco_probe,
+};
+module_platform_driver(applnco_driver);
+
+MODULE_AUTHOR("Martin PoviÅ¡er <povik+lin@cutebit.org>");
+MODULE_DESCRIPTION("Clock driver for NCO blocks on Apple SoCs");
+MODULE_LICENSE("GPL");
index a2c6486..f8417ee 100644 (file)
@@ -28,11 +28,13 @@ static const struct clk_div_table spi_div_table[] = {
        { .val = 1, .div = 8, },
        { .val = 2, .div = 2, },
        { .val = 3, .div = 1, },
+       { /* sentinel */ }
 };
 
 static const struct clk_div_table timer_div_table[] = {
        { .val = 0, .div = 256, },
        { .val = 1, .div = 1, },
+       { /* sentinel */ }
 };
 
 struct clps711x_clk {
index 92bc4ac..dc5040a 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/i2c.h>
 #include <linux/of_device.h>
 #include <linux/module.h>
+#include <linux/regmap.h>
 
 #define CH_MAX 4
 #define RATIO_REG_SIZE 4
@@ -39,6 +40,8 @@
 /* DEVICE_CFG1 */
 #define RSEL(x)                (((x) & 0x3) << 3)
 #define RSEL_MASK      RSEL(0x3)
+#define AUXOUTSRC(x)   (((x) & 0x3) << 1)
+#define AUXOUTSRC_MASK AUXOUTSRC(0x3)
 #define ENDEV1         (0x1)
 
 /* DEVICE_CFG2 */
 #define LOCKCLK_MASK   LOCKCLK(0x3)
 #define FRACNSRC_MASK  (1 << 0)
 #define FRACNSRC_STATIC                (0 << 0)
-#define FRACNSRC_DYNAMIC       (1 << 1)
+#define FRACNSRC_DYNAMIC       (1 << 0)
 
 /* GLOBAL_CFG */
+#define FREEZE         (1 << 7)
 #define ENDEV2         (0x1)
 
 /* FUNC_CFG1 */
 #define REF_CLK        1
 #define CLK_MAX 2
 
+static bool cs2000_readable_reg(struct device *dev, unsigned int reg)
+{
+       return reg > 0;
+}
+
+static bool cs2000_writeable_reg(struct device *dev, unsigned int reg)
+{
+       return reg != DEVICE_ID;
+}
+
+static bool cs2000_volatile_reg(struct device *dev, unsigned int reg)
+{
+       return reg == DEVICE_CTRL;
+}
+
+static const struct regmap_config cs2000_regmap_config = {
+       .reg_bits       = 8,
+       .val_bits       = 8,
+       .max_register   = FUNC_CFG2,
+       .readable_reg   = cs2000_readable_reg,
+       .writeable_reg  = cs2000_writeable_reg,
+       .volatile_reg   = cs2000_volatile_reg,
+};
+
 struct cs2000_priv {
        struct clk_hw hw;
        struct i2c_client *client;
        struct clk *clk_in;
        struct clk *ref_clk;
+       struct regmap *regmap;
+
+       bool dynamic_mode;
+       bool lf_ratio;
+       bool clk_skip;
 
        /* suspend/resume */
        unsigned long saved_rate;
@@ -94,55 +127,30 @@ static const struct i2c_device_id cs2000_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, cs2000_id);
 
-#define cs2000_read(priv, addr) \
-       i2c_smbus_read_byte_data(priv_to_client(priv), addr)
-#define cs2000_write(priv, addr, val) \
-       i2c_smbus_write_byte_data(priv_to_client(priv), addr, val)
-
-static int cs2000_bset(struct cs2000_priv *priv, u8 addr, u8 mask, u8 val)
-{
-       s32 data;
-
-       data = cs2000_read(priv, addr);
-       if (data < 0)
-               return data;
-
-       data &= ~mask;
-       data |= (val & mask);
-
-       return cs2000_write(priv, addr, data);
-}
-
 static int cs2000_enable_dev_config(struct cs2000_priv *priv, bool enable)
 {
        int ret;
 
-       ret = cs2000_bset(priv, DEVICE_CFG1, ENDEV1,
-                         enable ? ENDEV1 : 0);
-       if (ret < 0)
-               return ret;
-
-       ret = cs2000_bset(priv, GLOBAL_CFG,  ENDEV2,
-                         enable ? ENDEV2 : 0);
+       ret = regmap_update_bits(priv->regmap, DEVICE_CFG1, ENDEV1,
+                                enable ? ENDEV1 : 0);
        if (ret < 0)
                return ret;
 
-       ret = cs2000_bset(priv, FUNC_CFG1, CLKSKIPEN,
-                         enable ? CLKSKIPEN : 0);
+       ret = regmap_update_bits(priv->regmap, GLOBAL_CFG,  ENDEV2,
+                                enable ? ENDEV2 : 0);
        if (ret < 0)
                return ret;
 
-       /* FIXME: for Static ratio mode */
-       ret = cs2000_bset(priv, FUNC_CFG2, LFRATIO_MASK,
-                         LFRATIO_12_20);
+       ret = regmap_update_bits(priv->regmap, FUNC_CFG1, CLKSKIPEN,
+                                (enable && priv->clk_skip) ? CLKSKIPEN : 0);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int cs2000_clk_in_bound_rate(struct cs2000_priv *priv,
-                                   u32 rate_in)
+static int cs2000_ref_clk_bound_rate(struct cs2000_priv *priv,
+                                    u32 rate_in)
 {
        u32 val;
 
@@ -155,21 +163,21 @@ static int cs2000_clk_in_bound_rate(struct cs2000_priv *priv,
        else
                return -EINVAL;
 
-       return cs2000_bset(priv, FUNC_CFG1,
-                          REFCLKDIV_MASK,
-                          REFCLKDIV(val));
+       return regmap_update_bits(priv->regmap, FUNC_CFG1,
+                                 REFCLKDIV_MASK,
+                                 REFCLKDIV(val));
 }
 
 static int cs2000_wait_pll_lock(struct cs2000_priv *priv)
 {
        struct device *dev = priv_to_dev(priv);
-       s32 val;
-       unsigned int i;
+       unsigned int i, val;
+       int ret;
 
        for (i = 0; i < 256; i++) {
-               val = cs2000_read(priv, DEVICE_CTRL);
-               if (val < 0)
-                       return val;
+               ret = regmap_read(priv->regmap, DEVICE_CTRL, &val);
+               if (ret < 0)
+                       return ret;
                if (!(val & PLL_UNLOCK))
                        return 0;
                udelay(1);
@@ -183,41 +191,43 @@ static int cs2000_wait_pll_lock(struct cs2000_priv *priv)
 static int cs2000_clk_out_enable(struct cs2000_priv *priv, bool enable)
 {
        /* enable both AUX_OUT, CLK_OUT */
-       return cs2000_bset(priv, DEVICE_CTRL,
-                          (AUXOUTDIS | CLKOUTDIS),
-                          enable ? 0 :
-                          (AUXOUTDIS | CLKOUTDIS));
+       return regmap_update_bits(priv->regmap, DEVICE_CTRL,
+                                 (AUXOUTDIS | CLKOUTDIS),
+                                 enable ? 0 :
+                                 (AUXOUTDIS | CLKOUTDIS));
 }
 
-static u32 cs2000_rate_to_ratio(u32 rate_in, u32 rate_out)
+static u32 cs2000_rate_to_ratio(u32 rate_in, u32 rate_out, bool lf_ratio)
 {
        u64 ratio;
+       u32 multiplier = lf_ratio ? 12 : 20;
 
        /*
-        * ratio = rate_out / rate_in * 2^20
+        * ratio = rate_out / rate_in * 2^multiplier
         *
         * To avoid over flow, rate_out is u64.
         * The result should be u32.
         */
-       ratio = (u64)rate_out << 20;
+       ratio = (u64)rate_out << multiplier;
        do_div(ratio, rate_in);
 
        return ratio;
 }
 
-static unsigned long cs2000_ratio_to_rate(u32 ratio, u32 rate_in)
+static unsigned long cs2000_ratio_to_rate(u32 ratio, u32 rate_in, bool lf_ratio)
 {
        u64 rate_out;
+       u32 multiplier = lf_ratio ? 12 : 20;
 
        /*
-        * ratio = rate_out / rate_in * 2^20
+        * ratio = rate_out / rate_in * 2^multiplier
         *
         * To avoid over flow, rate_out is u64.
         * The result should be u32 or unsigned long.
         */
 
        rate_out = (u64)ratio * rate_in;
-       return rate_out >> 20;
+       return rate_out >> multiplier;
 }
 
 static int cs2000_ratio_set(struct cs2000_priv *priv,
@@ -230,9 +240,9 @@ static int cs2000_ratio_set(struct cs2000_priv *priv,
        if (CH_SIZE_ERR(ch))
                return -EINVAL;
 
-       val = cs2000_rate_to_ratio(rate_in, rate_out);
+       val = cs2000_rate_to_ratio(rate_in, rate_out, priv->lf_ratio);
        for (i = 0; i < RATIO_REG_SIZE; i++) {
-               ret = cs2000_write(priv,
+               ret = regmap_write(priv->regmap,
                                   Ratio_Add(ch, i),
                                   Ratio_Val(val, i));
                if (ret < 0)
@@ -244,14 +254,14 @@ static int cs2000_ratio_set(struct cs2000_priv *priv,
 
 static u32 cs2000_ratio_get(struct cs2000_priv *priv, int ch)
 {
-       s32 tmp;
+       unsigned int tmp, i;
        u32 val;
-       unsigned int i;
+       int ret;
 
        val = 0;
        for (i = 0; i < RATIO_REG_SIZE; i++) {
-               tmp = cs2000_read(priv, Ratio_Add(ch, i));
-               if (tmp < 0)
+               ret = regmap_read(priv->regmap, Ratio_Add(ch, i), &tmp);
+               if (ret < 0)
                        return 0;
 
                val |= Val_Ratio(tmp, i);
@@ -263,22 +273,20 @@ static u32 cs2000_ratio_get(struct cs2000_priv *priv, int ch)
 static int cs2000_ratio_select(struct cs2000_priv *priv, int ch)
 {
        int ret;
+       u8 fracnsrc;
 
        if (CH_SIZE_ERR(ch))
                return -EINVAL;
 
-       /*
-        * FIXME
-        *
-        * this driver supports static ratio mode only at this point.
-        */
-       ret = cs2000_bset(priv, DEVICE_CFG1, RSEL_MASK, RSEL(ch));
+       ret = regmap_update_bits(priv->regmap, DEVICE_CFG1, RSEL_MASK, RSEL(ch));
        if (ret < 0)
                return ret;
 
-       ret = cs2000_bset(priv, DEVICE_CFG2,
-                         (AUTORMOD | LOCKCLK_MASK | FRACNSRC_MASK),
-                         (LOCKCLK(ch) | FRACNSRC_STATIC));
+       fracnsrc = priv->dynamic_mode ? FRACNSRC_DYNAMIC : FRACNSRC_STATIC;
+
+       ret = regmap_update_bits(priv->regmap, DEVICE_CFG2,
+                                AUTORMOD | LOCKCLK_MASK | FRACNSRC_MASK,
+                                LOCKCLK(ch) | fracnsrc);
        if (ret < 0)
                return ret;
 
@@ -294,17 +302,39 @@ static unsigned long cs2000_recalc_rate(struct clk_hw *hw,
 
        ratio = cs2000_ratio_get(priv, ch);
 
-       return cs2000_ratio_to_rate(ratio, parent_rate);
+       return cs2000_ratio_to_rate(ratio, parent_rate, priv->lf_ratio);
 }
 
 static long cs2000_round_rate(struct clk_hw *hw, unsigned long rate,
                              unsigned long *parent_rate)
 {
+       struct cs2000_priv *priv = hw_to_priv(hw);
        u32 ratio;
 
-       ratio = cs2000_rate_to_ratio(*parent_rate, rate);
+       ratio = cs2000_rate_to_ratio(*parent_rate, rate, priv->lf_ratio);
 
-       return cs2000_ratio_to_rate(ratio, *parent_rate);
+       return cs2000_ratio_to_rate(ratio, *parent_rate, priv->lf_ratio);
+}
+
+static int cs2000_select_ratio_mode(struct cs2000_priv *priv,
+                                   unsigned long rate,
+                                   unsigned long parent_rate)
+{
+       /*
+        * From the datasheet:
+        *
+        * | It is recommended that the 12.20 High-Resolution format be
+        * | utilized whenever the desired ratio is less than 4096 since
+        * | the output frequency accuracy of the PLL is directly proportional
+        * | to the accuracy of the timing reference clock and the resolution
+        * | of the R_UD.
+        *
+        * This mode is only available in dynamic mode.
+        */
+       priv->lf_ratio = priv->dynamic_mode && ((rate / parent_rate) > 4096);
+
+       return regmap_update_bits(priv->regmap, FUNC_CFG2, LFRATIO_MASK,
+                                 priv->lf_ratio ? LFRATIO_20_12 : LFRATIO_12_20);
 }
 
 static int __cs2000_set_rate(struct cs2000_priv *priv, int ch,
@@ -313,7 +343,11 @@ static int __cs2000_set_rate(struct cs2000_priv *priv, int ch,
 {
        int ret;
 
-       ret = cs2000_clk_in_bound_rate(priv, parent_rate);
+       ret = regmap_update_bits(priv->regmap, GLOBAL_CFG, FREEZE, FREEZE);
+       if (ret < 0)
+               return ret;
+
+       ret = cs2000_select_ratio_mode(priv, rate, parent_rate);
        if (ret < 0)
                return ret;
 
@@ -325,6 +359,10 @@ static int __cs2000_set_rate(struct cs2000_priv *priv, int ch,
        if (ret < 0)
                return ret;
 
+       ret = regmap_update_bits(priv->regmap, GLOBAL_CFG, FREEZE, 0);
+       if (ret < 0)
+               return ret;
+
        priv->saved_rate        = rate;
        priv->saved_parent_rate = parent_rate;
 
@@ -380,8 +418,13 @@ static void cs2000_disable(struct clk_hw *hw)
 
 static u8 cs2000_get_parent(struct clk_hw *hw)
 {
-       /* always return REF_CLK */
-       return REF_CLK;
+       struct cs2000_priv *priv = hw_to_priv(hw);
+
+       /*
+        * In dynamic mode, output rates are derived from CLK_IN.
+        * In static mode, CLK_IN is ignored, so we return REF_CLK instead.
+        */
+       return priv->dynamic_mode ? CLK_IN : REF_CLK;
 }
 
 static const struct clk_ops cs2000_ops = {
@@ -421,22 +464,44 @@ static int cs2000_clk_register(struct cs2000_priv *priv)
        struct clk_init_data init;
        const char *name = np->name;
        static const char *parent_names[CLK_MAX];
+       u32 aux_out = 0;
+       int ref_clk_rate;
        int ch = 0; /* it uses ch0 only at this point */
-       int rate;
        int ret;
 
        of_property_read_string(np, "clock-output-names", &name);
 
-       /*
-        * set default rate as 1/1.
-        * otherwise .set_rate which setup ratio
-        * is never called if user requests 1/1 rate
-        */
-       rate = clk_get_rate(priv->ref_clk);
-       ret = __cs2000_set_rate(priv, ch, rate, rate);
+       priv->dynamic_mode = of_property_read_bool(np, "cirrus,dynamic-mode");
+       dev_info(dev, "operating in %s mode\n",
+                priv->dynamic_mode ? "dynamic" : "static");
+
+       of_property_read_u32(np, "cirrus,aux-output-source", &aux_out);
+       ret = regmap_update_bits(priv->regmap, DEVICE_CFG1,
+                                AUXOUTSRC_MASK, AUXOUTSRC(aux_out));
+       if (ret < 0)
+               return ret;
+
+       priv->clk_skip = of_property_read_bool(np, "cirrus,clock-skip");
+
+       ref_clk_rate = clk_get_rate(priv->ref_clk);
+       ret = cs2000_ref_clk_bound_rate(priv, ref_clk_rate);
        if (ret < 0)
                return ret;
 
+       if (priv->dynamic_mode) {
+               /* Default to low-frequency mode to allow for large ratios */
+               priv->lf_ratio = true;
+       } else {
+               /*
+                * set default rate as 1/1.
+                * otherwise .set_rate which setup ratio
+                * is never called if user requests 1/1 rate
+                */
+               ret = __cs2000_set_rate(priv, ch, ref_clk_rate, ref_clk_rate);
+               if (ret < 0)
+                       return ret;
+       }
+
        parent_names[CLK_IN]    = __clk_get_name(priv->clk_in);
        parent_names[REF_CLK]   = __clk_get_name(priv->ref_clk);
 
@@ -464,12 +529,13 @@ static int cs2000_clk_register(struct cs2000_priv *priv)
 static int cs2000_version_print(struct cs2000_priv *priv)
 {
        struct device *dev = priv_to_dev(priv);
-       s32 val;
        const char *revision;
+       unsigned int val;
+       int ret;
 
-       val = cs2000_read(priv, DEVICE_ID);
-       if (val < 0)
-               return val;
+       ret = regmap_read(priv->regmap, DEVICE_ID, &val);
+       if (ret < 0)
+               return ret;
 
        /* CS2000 should be 0x0 */
        if (val >> 3)
@@ -518,6 +584,10 @@ static int cs2000_probe(struct i2c_client *client,
        priv->client = client;
        i2c_set_clientdata(client, priv);
 
+       priv->regmap = devm_regmap_init_i2c(client, &cs2000_regmap_config);
+       if (IS_ERR(priv->regmap))
+               return PTR_ERR(priv->regmap);
+
        ret = cs2000_clk_get(priv);
        if (ret < 0)
                return ret;
index 4e4b6d3..54942d7 100644 (file)
@@ -131,6 +131,28 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
        return hw;
 }
 
+/**
+ * devm_clk_hw_register_fixed_factor_index - Register a fixed factor clock with
+ * parent from DT index
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @index: index of phandle in @dev 'clocks' property
+ * @flags: fixed factor flags
+ * @mult: multiplier
+ * @div: divider
+ *
+ * Return: Pointer to fixed factor clk_hw structure that was registered or
+ * an error pointer.
+ */
+struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
+               const char *name, unsigned int index, unsigned long flags,
+               unsigned int mult, unsigned int div)
+{
+       return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, index,
+                                             flags, mult, div, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_index);
+
 struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
                const char *name, const char *parent_name, unsigned long flags,
                unsigned int mult, unsigned int div)
index 4274540..8efa514 100644 (file)
@@ -34,7 +34,7 @@
  * and assume that the IP, that needs m and n, has also its own
  * prescaler, which is capable to divide by 2^scale. In this way
  * we get the denominator to satisfy the desired range (2) and
- * at the same time much much better result of m and n than simple
+ * at the same time a much better result of m and n than simple
  * saturated values.
  */
 
diff --git a/drivers/clk/clk-gate_test.c b/drivers/clk/clk-gate_test.c
new file mode 100644 (file)
index 0000000..e136aaa
--- /dev/null
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for clk gate basic type
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include <kunit/test.h>
+
+static void clk_gate_register_test_dev(struct kunit *test)
+{
+       struct clk_hw *ret;
+       struct platform_device *pdev;
+
+       pdev = platform_device_register_simple("test_gate_device", -1, NULL, 0);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+       ret = clk_hw_register_gate(&pdev->dev, "test_gate", NULL, 0, NULL,
+                                  0, 0, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
+       KUNIT_EXPECT_STREQ(test, "test_gate", clk_hw_get_name(ret));
+       KUNIT_EXPECT_EQ(test, 0UL, clk_hw_get_flags(ret));
+
+       clk_hw_unregister_gate(ret);
+       platform_device_put(pdev);
+}
+
+static void clk_gate_register_test_parent_names(struct kunit *test)
+{
+       struct clk_hw *parent;
+       struct clk_hw *ret;
+
+       parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
+                                           1000000);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+       ret = clk_hw_register_gate(NULL, "test_gate", "test_parent", 0, NULL,
+                                  0, 0, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
+       KUNIT_EXPECT_PTR_EQ(test, parent, clk_hw_get_parent(ret));
+
+       clk_hw_unregister_gate(ret);
+       clk_hw_unregister_fixed_rate(parent);
+}
+
+static void clk_gate_register_test_parent_data(struct kunit *test)
+{
+       struct clk_hw *parent;
+       struct clk_hw *ret;
+       struct clk_parent_data pdata = { };
+
+       parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
+                                           1000000);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+       pdata.hw = parent;
+
+       ret = clk_hw_register_gate_parent_data(NULL, "test_gate", &pdata, 0,
+                                              NULL, 0, 0, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
+       KUNIT_EXPECT_PTR_EQ(test, parent, clk_hw_get_parent(ret));
+
+       clk_hw_unregister_gate(ret);
+       clk_hw_unregister_fixed_rate(parent);
+}
+
+static void clk_gate_register_test_parent_data_legacy(struct kunit *test)
+{
+       struct clk_hw *parent;
+       struct clk_hw *ret;
+       struct clk_parent_data pdata = { };
+
+       parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
+                                           1000000);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+       pdata.name = "test_parent";
+
+       ret = clk_hw_register_gate_parent_data(NULL, "test_gate", &pdata, 0,
+                                              NULL, 0, 0, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
+       KUNIT_EXPECT_PTR_EQ(test, parent, clk_hw_get_parent(ret));
+
+       clk_hw_unregister_gate(ret);
+       clk_hw_unregister_fixed_rate(parent);
+}
+
+static void clk_gate_register_test_parent_hw(struct kunit *test)
+{
+       struct clk_hw *parent;
+       struct clk_hw *ret;
+
+       parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
+                                           1000000);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+       ret = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0, NULL,
+                                            0, 0, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
+       KUNIT_EXPECT_PTR_EQ(test, parent, clk_hw_get_parent(ret));
+
+       clk_hw_unregister_gate(ret);
+       clk_hw_unregister_fixed_rate(parent);
+}
+
+static void clk_gate_register_test_hiword_invalid(struct kunit *test)
+{
+       struct clk_hw *ret;
+
+       ret = clk_hw_register_gate(NULL, "test_gate", NULL, 0, NULL,
+                                  20, CLK_GATE_HIWORD_MASK, NULL);
+
+       KUNIT_EXPECT_TRUE(test, IS_ERR(ret));
+}
+
+static struct kunit_case clk_gate_register_test_cases[] = {
+       KUNIT_CASE(clk_gate_register_test_dev),
+       KUNIT_CASE(clk_gate_register_test_parent_names),
+       KUNIT_CASE(clk_gate_register_test_parent_data),
+       KUNIT_CASE(clk_gate_register_test_parent_data_legacy),
+       KUNIT_CASE(clk_gate_register_test_parent_hw),
+       KUNIT_CASE(clk_gate_register_test_hiword_invalid),
+       {}
+};
+
+static struct kunit_suite clk_gate_register_test_suite = {
+       .name = "clk-gate-register-test",
+       .test_cases = clk_gate_register_test_cases,
+};
+
+struct clk_gate_test_context {
+       void __iomem *fake_mem;
+       struct clk_hw *hw;
+       struct clk_hw *parent;
+       u32 fake_reg; /* Keep at end, KASAN can detect out of bounds */
+};
+
+static struct clk_gate_test_context *clk_gate_test_alloc_ctx(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx;
+
+       test->priv = ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+       ctx->fake_mem = (void __force __iomem *)&ctx->fake_reg;
+
+       return ctx;
+}
+
+static void clk_gate_test_parent_rate(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx = test->priv;
+       struct clk_hw *parent = ctx->parent;
+       struct clk_hw *hw = ctx->hw;
+       unsigned long prate = clk_hw_get_rate(parent);
+       unsigned long rate = clk_hw_get_rate(hw);
+
+       KUNIT_EXPECT_EQ(test, prate, rate);
+}
+
+static void clk_gate_test_enable(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx = test->priv;
+       struct clk_hw *parent = ctx->parent;
+       struct clk_hw *hw = ctx->hw;
+       struct clk *clk = hw->clk;
+       u32 enable_val = BIT(5);
+
+       KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
+
+       KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg);
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(parent));
+}
+
+static void clk_gate_test_disable(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx = test->priv;
+       struct clk_hw *parent = ctx->parent;
+       struct clk_hw *hw = ctx->hw;
+       struct clk *clk = hw->clk;
+       u32 enable_val = BIT(5);
+       u32 disable_val = 0;
+
+       KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
+       KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg);
+
+       clk_disable_unprepare(clk);
+       KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg);
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(parent));
+}
+
+static struct kunit_case clk_gate_test_cases[] = {
+       KUNIT_CASE(clk_gate_test_parent_rate),
+       KUNIT_CASE(clk_gate_test_enable),
+       KUNIT_CASE(clk_gate_test_disable),
+       {}
+};
+
+static int clk_gate_test_init(struct kunit *test)
+{
+       struct clk_hw *parent;
+       struct clk_hw *hw;
+       struct clk_gate_test_context *ctx;
+
+       ctx = clk_gate_test_alloc_ctx(test);
+       parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
+                                           2000000);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+       hw = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0,
+                                           ctx->fake_mem, 5, 0, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+       ctx->hw = hw;
+       ctx->parent = parent;
+
+       return 0;
+}
+
+static void clk_gate_test_exit(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx = test->priv;
+
+       clk_hw_unregister_gate(ctx->hw);
+       clk_hw_unregister_fixed_rate(ctx->parent);
+}
+
+static struct kunit_suite clk_gate_test_suite = {
+       .name = "clk-gate-test",
+       .init = clk_gate_test_init,
+       .exit = clk_gate_test_exit,
+       .test_cases = clk_gate_test_cases,
+};
+
+static void clk_gate_test_invert_enable(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx = test->priv;
+       struct clk_hw *parent = ctx->parent;
+       struct clk_hw *hw = ctx->hw;
+       struct clk *clk = hw->clk;
+       u32 enable_val = 0;
+
+       KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
+
+       KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg);
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(parent));
+}
+
+static void clk_gate_test_invert_disable(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx = test->priv;
+       struct clk_hw *parent = ctx->parent;
+       struct clk_hw *hw = ctx->hw;
+       struct clk *clk = hw->clk;
+       u32 enable_val = 0;
+       u32 disable_val = BIT(15);
+
+       KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
+       KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg);
+
+       clk_disable_unprepare(clk);
+       KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg);
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(parent));
+}
+
+static struct kunit_case clk_gate_test_invert_cases[] = {
+       KUNIT_CASE(clk_gate_test_invert_enable),
+       KUNIT_CASE(clk_gate_test_invert_disable),
+       {}
+};
+
+static int clk_gate_test_invert_init(struct kunit *test)
+{
+       struct clk_hw *parent;
+       struct clk_hw *hw;
+       struct clk_gate_test_context *ctx;
+
+       ctx = clk_gate_test_alloc_ctx(test);
+       parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
+                                           2000000);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+       ctx->fake_reg = BIT(15); /* Default to off */
+       hw = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0,
+                                           ctx->fake_mem, 15,
+                                           CLK_GATE_SET_TO_DISABLE, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+       ctx->hw = hw;
+       ctx->parent = parent;
+
+       return 0;
+}
+
+static struct kunit_suite clk_gate_test_invert_suite = {
+       .name = "clk-gate-invert-test",
+       .init = clk_gate_test_invert_init,
+       .exit = clk_gate_test_exit,
+       .test_cases = clk_gate_test_invert_cases,
+};
+
+static void clk_gate_test_hiword_enable(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx = test->priv;
+       struct clk_hw *parent = ctx->parent;
+       struct clk_hw *hw = ctx->hw;
+       struct clk *clk = hw->clk;
+       u32 enable_val = BIT(9) | BIT(9 + 16);
+
+       KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
+
+       KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg);
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent));
+       KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(parent));
+}
+
+static void clk_gate_test_hiword_disable(struct kunit *test)
+{
+       struct clk_gate_test_context *ctx = test->priv;
+       struct clk_hw *parent = ctx->parent;
+       struct clk_hw *hw = ctx->hw;
+       struct clk *clk = hw->clk;
+       u32 enable_val = BIT(9) | BIT(9 + 16);
+       u32 disable_val = BIT(9 + 16);
+
+       KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
+       KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg);
+
+       clk_disable_unprepare(clk);
+       KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg);
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent));
+       KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(parent));
+}
+
+static struct kunit_case clk_gate_test_hiword_cases[] = {
+       KUNIT_CASE(clk_gate_test_hiword_enable),
+       KUNIT_CASE(clk_gate_test_hiword_disable),
+       {}
+};
+
+static int clk_gate_test_hiword_init(struct kunit *test)
+{
+       struct clk_hw *parent;
+       struct clk_hw *hw;
+       struct clk_gate_test_context *ctx;
+
+       ctx = clk_gate_test_alloc_ctx(test);
+       parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
+                                           2000000);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+       hw = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0,
+                                           ctx->fake_mem, 9,
+                                           CLK_GATE_HIWORD_MASK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+       ctx->hw = hw;
+       ctx->parent = parent;
+
+       return 0;
+}
+
+static struct kunit_suite clk_gate_test_hiword_suite = {
+       .name = "clk-gate-hiword-test",
+       .init = clk_gate_test_hiword_init,
+       .exit = clk_gate_test_exit,
+       .test_cases = clk_gate_test_hiword_cases,
+};
+
+static void clk_gate_test_is_enabled(struct kunit *test)
+{
+       struct clk_hw *hw;
+       struct clk_gate_test_context *ctx;
+
+       ctx = clk_gate_test_alloc_ctx(test);
+       ctx->fake_reg = BIT(7);
+       hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 7,
+                                 0, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+       KUNIT_ASSERT_TRUE(test, clk_hw_is_enabled(hw));
+
+       clk_hw_unregister_gate(hw);
+}
+
+static void clk_gate_test_is_disabled(struct kunit *test)
+{
+       struct clk_hw *hw;
+       struct clk_gate_test_context *ctx;
+
+       ctx = clk_gate_test_alloc_ctx(test);
+       ctx->fake_reg = BIT(4);
+       hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 7,
+                                 0, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+       KUNIT_ASSERT_FALSE(test, clk_hw_is_enabled(hw));
+
+       clk_hw_unregister_gate(hw);
+}
+
+static void clk_gate_test_is_enabled_inverted(struct kunit *test)
+{
+       struct clk_hw *hw;
+       struct clk_gate_test_context *ctx;
+
+       ctx = clk_gate_test_alloc_ctx(test);
+       ctx->fake_reg = BIT(31);
+       hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 2,
+                                 CLK_GATE_SET_TO_DISABLE, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+       KUNIT_ASSERT_TRUE(test, clk_hw_is_enabled(hw));
+
+       clk_hw_unregister_gate(hw);
+}
+
+static void clk_gate_test_is_disabled_inverted(struct kunit *test)
+{
+       struct clk_hw *hw;
+       struct clk_gate_test_context *ctx;
+
+       ctx = clk_gate_test_alloc_ctx(test);
+       ctx->fake_reg = BIT(29);
+       hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 29,
+                                 CLK_GATE_SET_TO_DISABLE, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+       KUNIT_ASSERT_FALSE(test, clk_hw_is_enabled(hw));
+
+       clk_hw_unregister_gate(hw);
+}
+
+static struct kunit_case clk_gate_test_enabled_cases[] = {
+       KUNIT_CASE(clk_gate_test_is_enabled),
+       KUNIT_CASE(clk_gate_test_is_disabled),
+       KUNIT_CASE(clk_gate_test_is_enabled_inverted),
+       KUNIT_CASE(clk_gate_test_is_disabled_inverted),
+       {}
+};
+
+static struct kunit_suite clk_gate_test_enabled_suite = {
+       .name = "clk-gate-is_enabled-test",
+       .test_cases = clk_gate_test_enabled_cases,
+};
+
+kunit_test_suites(
+       &clk_gate_register_test_suite,
+       &clk_gate_test_suite,
+       &clk_gate_test_invert_suite,
+       &clk_gate_test_hiword_suite,
+       &clk_gate_test_enabled_suite
+);
+MODULE_LICENSE("GPL v2");
index 20582aa..214045f 100644 (file)
@@ -40,7 +40,7 @@ static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
                writel(val, mux->reg);
 }
 
-int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
+int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
                         unsigned int val)
 {
        int num_parents = clk_hw_get_num_parents(hw);
@@ -67,7 +67,7 @@ int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
 }
 EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
 
-unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
+unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index)
 {
        unsigned int val = index;
 
@@ -152,7 +152,7 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
                const struct clk_hw **parent_hws,
                const struct clk_parent_data *parent_data,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
-               u8 clk_mux_flags, u32 *table, spinlock_t *lock)
+               u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
 {
        struct clk_mux *mux;
        struct clk_hw *hw;
@@ -218,7 +218,7 @@ struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node
                const struct clk_hw **parent_hws,
                const struct clk_parent_data *parent_data,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
-               u8 clk_mux_flags, u32 *table, spinlock_t *lock)
+               u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
 {
        struct clk_hw **ptr, *hw;
 
@@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(__devm_clk_hw_register_mux);
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
                const char * const *parent_names, u8 num_parents,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
-               u8 clk_mux_flags, u32 *table, spinlock_t *lock)
+               u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
 {
        struct clk_hw *hw;
 
index 78d5ea6..cda5e25 100644 (file)
@@ -209,15 +209,11 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        const struct oxnas_stdclk_data *data;
-       const struct of_device_id *id;
        struct regmap *regmap;
        int ret;
        int i;
 
-       id = of_match_device(oxnas_stdclk_dt_ids, &pdev->dev);
-       if (!id)
-               return -ENODEV;
-       data = id->data;
+       data = of_device_get_match_data(&pdev->dev);
 
        regmap = syscon_node_to_regmap(of_get_parent(np));
        if (IS_ERR(regmap)) {
diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
new file mode 100644 (file)
index 0000000..59d9cf0
--- /dev/null
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Renesas 9-series PCIe clock generator driver
+ *
+ * The following series can be supported:
+ *   - 9FGV/9DBV/9DMV/9FGL/9DML/9QXL/9SQ
+ * Currently supported:
+ *   - 9FGV0241
+ *
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define RS9_REG_OE                             0x0
+#define RS9_REG_OE_DIF_OE(n)                   BIT((n) + 1)
+#define RS9_REG_SS                             0x1
+#define RS9_REG_SS_AMP_0V6                     0x0
+#define RS9_REG_SS_AMP_0V7                     0x1
+#define RS9_REG_SS_AMP_0V8                     0x2
+#define RS9_REG_SS_AMP_0V9                     0x3
+#define RS9_REG_SS_AMP_MASK                    0x3
+#define RS9_REG_SS_SSC_100                     0
+#define RS9_REG_SS_SSC_M025                    (1 << 3)
+#define RS9_REG_SS_SSC_M050                    (3 << 3)
+#define RS9_REG_SS_SSC_MASK                    (3 << 3)
+#define RS9_REG_SS_SSC_LOCK                    BIT(5)
+#define RS9_REG_SR                             0x2
+#define RS9_REG_SR_2V0_DIF(n)                  0
+#define RS9_REG_SR_3V0_DIF(n)                  BIT((n) + 1)
+#define RS9_REG_SR_DIF_MASK(n)         BIT((n) + 1)
+#define RS9_REG_REF                            0x3
+#define RS9_REG_REF_OE                         BIT(4)
+#define RS9_REG_REF_OD                         BIT(5)
+#define RS9_REG_REF_SR_SLOWEST                 0
+#define RS9_REG_REF_SR_SLOW                    (1 << 6)
+#define RS9_REG_REF_SR_FAST                    (2 << 6)
+#define RS9_REG_REF_SR_FASTER                  (3 << 6)
+#define RS9_REG_VID                            0x5
+#define RS9_REG_DID                            0x6
+#define RS9_REG_BCP                            0x7
+
+/* Supported Renesas 9-series models. */
+enum rs9_model {
+       RENESAS_9FGV0241,
+};
+
+/* Structure to describe features of a particular 9-series model */
+struct rs9_chip_info {
+       const enum rs9_model    model;
+       unsigned int            num_clks;
+};
+
+struct rs9_driver_data {
+       struct i2c_client       *client;
+       struct regmap           *regmap;
+       const struct rs9_chip_info *chip_info;
+       struct clk              *pin_xin;
+       struct clk_hw           *clk_dif[2];
+       u8                      pll_amplitude;
+       u8                      pll_ssc;
+       u8                      clk_dif_sr;
+};
+
+/*
+ * Renesas 9-series i2c regmap
+ */
+static const struct regmap_range rs9_readable_ranges[] = {
+       regmap_reg_range(RS9_REG_OE, RS9_REG_REF),
+       regmap_reg_range(RS9_REG_VID, RS9_REG_BCP),
+};
+
+static const struct regmap_access_table rs9_readable_table = {
+       .yes_ranges = rs9_readable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(rs9_readable_ranges),
+};
+
+static const struct regmap_range rs9_writeable_ranges[] = {
+       regmap_reg_range(RS9_REG_OE, RS9_REG_REF),
+       regmap_reg_range(RS9_REG_BCP, RS9_REG_BCP),
+};
+
+static const struct regmap_access_table rs9_writeable_table = {
+       .yes_ranges = rs9_writeable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(rs9_writeable_ranges),
+};
+
+static const struct regmap_config rs9_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .cache_type = REGCACHE_FLAT,
+       .max_register = 0x8,
+       .rd_table = &rs9_readable_table,
+       .wr_table = &rs9_writeable_table,
+};
+
+static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx)
+{
+       struct i2c_client *client = rs9->client;
+       unsigned char name[5] = "DIF0";
+       struct device_node *np;
+       int ret;
+       u32 sr;
+
+       /* Set defaults */
+       rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
+       rs9->clk_dif_sr |= RS9_REG_SR_3V0_DIF(idx);
+
+       snprintf(name, 5, "DIF%d", idx);
+       np = of_get_child_by_name(client->dev.of_node, name);
+       if (!np)
+               return 0;
+
+       /* Output clock slew rate */
+       ret = of_property_read_u32(np, "renesas,slew-rate", &sr);
+       of_node_put(np);
+       if (!ret) {
+               if (sr == 2000000) {            /* 2V/ns */
+                       rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
+                       rs9->clk_dif_sr |= RS9_REG_SR_2V0_DIF(idx);
+               } else if (sr == 3000000) {     /* 3V/ns (default) */
+                       rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
+                       rs9->clk_dif_sr |= RS9_REG_SR_3V0_DIF(idx);
+               } else
+                       ret = dev_err_probe(&client->dev, -EINVAL,
+                                           "Invalid renesas,slew-rate value\n");
+       }
+
+       return ret;
+}
+
+static int rs9_get_common_config(struct rs9_driver_data *rs9)
+{
+       struct i2c_client *client = rs9->client;
+       struct device_node *np = client->dev.of_node;
+       unsigned int amp, ssc;
+       int ret;
+
+       /* Set defaults */
+       rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
+       rs9->pll_ssc = RS9_REG_SS_SSC_100;
+
+       /* Output clock amplitude */
+       ret = of_property_read_u32(np, "renesas,out-amplitude-microvolt",
+                                  &amp);
+       if (!ret) {
+               if (amp == 600000)      /* 0.6V */
+                       rs9->pll_amplitude = RS9_REG_SS_AMP_0V6;
+               else if (amp == 700000) /* 0.7V (default) */
+                       rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
+               else if (amp == 800000) /* 0.8V */
+                       rs9->pll_amplitude = RS9_REG_SS_AMP_0V8;
+               else if (amp == 900000) /* 0.9V */
+                       rs9->pll_amplitude = RS9_REG_SS_AMP_0V9;
+               else
+                       return dev_err_probe(&client->dev, -EINVAL,
+                                            "Invalid renesas,out-amplitude-microvolt value\n");
+       }
+
+       /* Output clock spread spectrum */
+       ret = of_property_read_u32(np, "renesas,out-spread-spectrum", &ssc);
+       if (!ret) {
+               if (ssc == 100000)      /* 100% ... no spread (default) */
+                       rs9->pll_ssc = RS9_REG_SS_SSC_100;
+               else if (ssc == 99750)  /* -0.25% ... down spread */
+                       rs9->pll_ssc = RS9_REG_SS_SSC_M025;
+               else if (ssc == 99500)  /* -0.50% ... down spread */
+                       rs9->pll_ssc = RS9_REG_SS_SSC_M050;
+               else
+                       return dev_err_probe(&client->dev, -EINVAL,
+                                            "Invalid renesas,out-spread-spectrum value\n");
+       }
+
+       return 0;
+}
+
+static void rs9_update_config(struct rs9_driver_data *rs9)
+{
+       int i;
+
+       /* If amplitude is non-default, update it. */
+       if (rs9->pll_amplitude != RS9_REG_SS_AMP_0V7) {
+               regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_AMP_MASK,
+                                  rs9->pll_amplitude);
+       }
+
+       /* If SSC is non-default, update it. */
+       if (rs9->pll_ssc != RS9_REG_SS_SSC_100) {
+               regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_SSC_MASK,
+                                  rs9->pll_ssc);
+       }
+
+       for (i = 0; i < rs9->chip_info->num_clks; i++) {
+               if (rs9->clk_dif_sr & RS9_REG_SR_3V0_DIF(i))
+                       continue;
+
+               regmap_update_bits(rs9->regmap, RS9_REG_SR, RS9_REG_SR_3V0_DIF(i),
+                                  rs9->clk_dif_sr & RS9_REG_SR_3V0_DIF(i));
+       }
+}
+
+static struct clk_hw *
+rs9_of_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+       struct rs9_driver_data *rs9 = data;
+       unsigned int idx = clkspec->args[0];
+
+       return rs9->clk_dif[idx];
+}
+
+static int rs9_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       unsigned char name[5] = "DIF0";
+       struct rs9_driver_data *rs9;
+       struct clk_hw *hw;
+       int i, ret;
+
+       rs9 = devm_kzalloc(&client->dev, sizeof(*rs9), GFP_KERNEL);
+       if (!rs9)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, rs9);
+       rs9->client = client;
+       rs9->chip_info = device_get_match_data(&client->dev);
+       if (!rs9->chip_info)
+               return -EINVAL;
+
+       /* Fetch common configuration from DT (if specified) */
+       ret = rs9_get_common_config(rs9);
+       if (ret)
+               return ret;
+
+       /* Fetch DIFx output configuration from DT (if specified) */
+       for (i = 0; i < rs9->chip_info->num_clks; i++) {
+               ret = rs9_get_output_config(rs9, i);
+               if (ret)
+                       return ret;
+       }
+
+       rs9->regmap = devm_regmap_init_i2c(client, &rs9_regmap_config);
+       if (IS_ERR(rs9->regmap))
+               return dev_err_probe(&client->dev, PTR_ERR(rs9->regmap),
+                                    "Failed to allocate register map\n");
+
+       /* Register clock */
+       for (i = 0; i < rs9->chip_info->num_clks; i++) {
+               snprintf(name, 5, "DIF%d", i);
+               hw = devm_clk_hw_register_fixed_factor_index(&client->dev, name,
+                                                   0, 0, 4, 1);
+               if (IS_ERR(hw))
+                       return PTR_ERR(hw);
+
+               rs9->clk_dif[i] = hw;
+       }
+
+       ret = devm_of_clk_add_hw_provider(&client->dev, rs9_of_clk_get, rs9);
+       if (!ret)
+               rs9_update_config(rs9);
+
+       return ret;
+}
+
+static int __maybe_unused rs9_suspend(struct device *dev)
+{
+       struct rs9_driver_data *rs9 = dev_get_drvdata(dev);
+
+       regcache_cache_only(rs9->regmap, true);
+       regcache_mark_dirty(rs9->regmap);
+
+       return 0;
+}
+
+static int __maybe_unused rs9_resume(struct device *dev)
+{
+       struct rs9_driver_data *rs9 = dev_get_drvdata(dev);
+       int ret;
+
+       regcache_cache_only(rs9->regmap, false);
+       ret = regcache_sync(rs9->regmap);
+       if (ret)
+               dev_err(dev, "Failed to restore register map: %d\n", ret);
+       return ret;
+}
+
+static const struct rs9_chip_info renesas_9fgv0241_info = {
+       .model          = RENESAS_9FGV0241,
+       .num_clks       = 2,
+};
+
+static const struct i2c_device_id rs9_id[] = {
+       { "9fgv0241", .driver_data = RENESAS_9FGV0241 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, rs9_id);
+
+static const struct of_device_id clk_rs9_of_match[] = {
+       { .compatible = "renesas,9fgv0241", .data = &renesas_9fgv0241_info },
+       { }
+};
+MODULE_DEVICE_TABLE(of, clk_rs9_of_match);
+
+static SIMPLE_DEV_PM_OPS(rs9_pm_ops, rs9_suspend, rs9_resume);
+
+static struct i2c_driver rs9_driver = {
+       .driver = {
+               .name = "clk-renesas-pcie-9series",
+               .pm     = &rs9_pm_ops,
+               .of_match_table = clk_rs9_of_match,
+       },
+       .probe          = rs9_probe,
+       .id_table       = rs9_id,
+};
+module_i2c_driver(rs9_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Renesas 9-series PCIe clock generator driver");
+MODULE_LICENSE("GPL");
index f7b4136..41851f4 100644 (file)
@@ -655,7 +655,7 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
        f = synth->data->freq_vco;
        f *= n_den >> 4;
 
-       /* Now we need to to 64-bit division: f/n_num */
+       /* Now we need to do 64-bit division: f/n_num */
        /* And compensate for the 4 bits we dropped */
        f = div64_u64(f, (n_num >> 4));
 
@@ -798,6 +798,15 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw,
        u32 r_divider;
        u8 r[3];
 
+       err = regmap_read(output->data->regmap,
+                       SI5341_OUT_CONFIG(output), &val);
+       if (err < 0)
+               return err;
+
+       /* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */
+       if (val & SI5341_OUT_CFG_RDIV_FORCE2)
+               return parent_rate / 2;
+
        err = regmap_bulk_read(output->data->regmap,
                        SI5341_OUT_R_REG(output), r, 3);
        if (err < 0)
@@ -814,13 +823,6 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw,
        r_divider += 1;
        r_divider <<= 1;
 
-       err = regmap_read(output->data->regmap,
-                       SI5341_OUT_CONFIG(output), &val);
-       if (err < 0)
-               return err;
-
-       if (val & SI5341_OUT_CFG_RDIV_FORCE2)
-               r_divider = 2;
 
        return parent_rate / r_divider;
 }
@@ -1468,7 +1470,7 @@ static ssize_t input_present_show(struct device *dev,
        if (res < 0)
                return res;
        res = !(status & SI5341_STATUS_LOSREF);
-       return snprintf(buf, PAGE_SIZE, "%d\n", res);
+       return sysfs_emit(buf, "%d\n", res);
 }
 static DEVICE_ATTR_RO(input_present);
 
@@ -1483,7 +1485,7 @@ static ssize_t input_present_sticky_show(struct device *dev,
        if (res < 0)
                return res;
        res = !(status & SI5341_STATUS_LOSREF);
-       return snprintf(buf, PAGE_SIZE, "%d\n", res);
+       return sysfs_emit(buf, "%d\n", res);
 }
 static DEVICE_ATTR_RO(input_present_sticky);
 
@@ -1498,7 +1500,7 @@ static ssize_t pll_locked_show(struct device *dev,
        if (res < 0)
                return res;
        res = !(status & SI5341_STATUS_LOL);
-       return snprintf(buf, PAGE_SIZE, "%d\n", res);
+       return sysfs_emit(buf, "%d\n", res);
 }
 static DEVICE_ATTR_RO(pll_locked);
 
@@ -1513,7 +1515,7 @@ static ssize_t pll_locked_sticky_show(struct device *dev,
        if (res < 0)
                return res;
        res = !(status & SI5341_STATUS_LOL);
-       return snprintf(buf, PAGE_SIZE, "%d\n", res);
+       return sysfs_emit(buf, "%d\n", res);
 }
 static DEVICE_ATTR_RO(pll_locked_sticky);
 
index 863274a..7ad2e62 100644 (file)
@@ -155,6 +155,10 @@ static const char * const eth_src[] = {
        "pll4_p", "pll3_q"
 };
 
+const struct clk_parent_data ethrx_src[] = {
+       { .name = "ethck_k", .fw_name = "ETH_RX_CLK/ETH_REF_CLK" },
+};
+
 static const char * const rng_src[] = {
        "ck_csi", "pll4_r", "ck_lse", "ck_lsi"
 };
@@ -317,6 +321,7 @@ struct clock_config {
        const char *name;
        const char *parent_name;
        const char * const *parent_names;
+       const struct clk_parent_data *parent_data;
        int num_parents;
        unsigned long flags;
        void *cfg;
@@ -576,6 +581,7 @@ static struct clk_hw *
 clk_stm32_register_gate_ops(struct device *dev,
                            const char *name,
                            const char *parent_name,
+                           const struct clk_parent_data *parent_data,
                            unsigned long flags,
                            void __iomem *base,
                            const struct stm32_gate_cfg *cfg,
@@ -586,7 +592,10 @@ clk_stm32_register_gate_ops(struct device *dev,
        int ret;
 
        init.name = name;
-       init.parent_names = &parent_name;
+       if (parent_name)
+               init.parent_names = &parent_name;
+       if (parent_data)
+               init.parent_data = parent_data;
        init.num_parents = 1;
        init.flags = flags;
 
@@ -611,6 +620,7 @@ clk_stm32_register_gate_ops(struct device *dev,
 static struct clk_hw *
 clk_stm32_register_composite(struct device *dev,
                             const char *name, const char * const *parent_names,
+                            const struct clk_parent_data *parent_data,
                             int num_parents, void __iomem *base,
                             const struct stm32_composite_cfg *cfg,
                             unsigned long flags, spinlock_t *lock)
@@ -1135,6 +1145,7 @@ _clk_stm32_register_gate(struct device *dev,
        return clk_stm32_register_gate_ops(dev,
                                    cfg->name,
                                    cfg->parent_name,
+                                   cfg->parent_data,
                                    cfg->flags,
                                    base,
                                    cfg->cfg,
@@ -1148,8 +1159,8 @@ _clk_stm32_register_composite(struct device *dev,
                              const struct clock_config *cfg)
 {
        return clk_stm32_register_composite(dev, cfg->name, cfg->parent_names,
-                                           cfg->num_parents, base, cfg->cfg,
-                                           cfg->flags, lock);
+                                           cfg->parent_data, cfg->num_parents,
+                                           base, cfg->cfg, cfg->flags, lock);
 }
 
 #define GATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
@@ -1258,6 +1269,16 @@ _clk_stm32_register_composite(struct device *dev,
        .func           = _clk_stm32_register_gate,\
 }
 
+#define STM32_GATE_PDATA(_id, _name, _parent, _flags, _gate)\
+{\
+       .id             = _id,\
+       .name           = _name,\
+       .parent_data    = _parent,\
+       .flags          = _flags,\
+       .cfg            = (struct stm32_gate_cfg *) {_gate},\
+       .func           = _clk_stm32_register_gate,\
+}
+
 #define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops)\
        (&(struct stm32_gate_cfg) {\
                &(struct gate_cfg) {\
@@ -1291,6 +1312,10 @@ _clk_stm32_register_composite(struct device *dev,
        STM32_GATE(_id, _name, _parent, _flags,\
                   _STM32_MGATE(_mgate))
 
+#define MGATE_MP1_PDATA(_id, _name, _parent, _flags, _mgate)\
+       STM32_GATE_PDATA(_id, _name, _parent, _flags,\
+                  _STM32_MGATE(_mgate))
+
 #define _STM32_DIV(_div_offset, _div_shift, _div_width,\
                   _div_flags, _div_table, _ops)\
        .div = &(struct stm32_div_cfg) {\
@@ -1354,6 +1379,9 @@ _clk_stm32_register_composite(struct device *dev,
 #define PCLK(_id, _name, _parent, _flags, _mgate)\
        MGATE_MP1(_id, _name, _parent, _flags, _mgate)
 
+#define PCLK_PDATA(_id, _name, _parent, _flags, _mgate)\
+       MGATE_MP1_PDATA(_id, _name, _parent, _flags, _mgate)
+
 #define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
             COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\
                       CLK_SET_RATE_NO_REPARENT | _flags,\
@@ -1951,7 +1979,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
        PCLK(MDMA, "mdma", "ck_axi", 0, G_MDMA),
        PCLK(GPU, "gpu", "ck_axi", 0, G_GPU),
        PCLK(ETHTX, "ethtx", "ck_axi", 0, G_ETHTX),
-       PCLK(ETHRX, "ethrx", "ck_axi", 0, G_ETHRX),
+       PCLK_PDATA(ETHRX, "ethrx", ethrx_src, 0, G_ETHRX),
        PCLK(ETHMAC, "ethmac", "ck_axi", 0, G_ETHMAC),
        PCLK(FMC, "fmc", "ck_axi", CLK_IGNORE_UNUSED, G_FMC),
        PCLK(QSPI, "qspi", "ck_axi", CLK_IGNORE_UNUSED, G_QSPI),
@@ -2008,7 +2036,6 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
        KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI),
        KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
        KCLK(USBO_K, "usbo_k", usbo_src, 0, G_USBO, M_USBO),
-       KCLK(ETHCK_K, "ethck_k", eth_src, 0, G_ETHCK, M_ETHCK),
 
        /* Particulary Kernel Clocks (no mux or no gate) */
        MGATE_MP1(DFSDM_K, "dfsdm_k", "ck_mcu", 0, G_DFSDM),
@@ -2017,11 +2044,16 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
        MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
        MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
 
-       COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE |
+       COMPOSITE(NO_ID, "ck_ker_eth", eth_src, CLK_OPS_PARENT_ENABLE |
                  CLK_SET_RATE_NO_REPARENT,
                  _NO_GATE,
                  _MMUX(M_ETHCK),
-                 _DIV(RCC_ETHCKSELR, 4, 4, 0, NULL)),
+                 _NO_DIV),
+
+       MGATE_MP1(ETHCK_K, "ethck_k", "ck_ker_eth", 0, G_ETHCK),
+
+       DIV(ETHPTP_K, "ethptp_k", "ck_ker_eth", CLK_OPS_PARENT_ENABLE |
+           CLK_SET_RATE_NO_REPARENT, RCC_ETHCKSELR, 4, 4, 0),
 
        /* RTC clock */
        COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE,
index 8de6a22..07a27b6 100644 (file)
@@ -37,7 +37,7 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
-static struct hlist_head *all_lists[] = {
+static const struct hlist_head *all_lists[] = {
        &clk_root_list,
        &clk_orphan_list,
        NULL,
@@ -632,6 +632,24 @@ static void clk_core_get_boundaries(struct clk_core *core,
                *max_rate = min(*max_rate, clk_user->max_rate);
 }
 
+static bool clk_core_check_boundaries(struct clk_core *core,
+                                     unsigned long min_rate,
+                                     unsigned long max_rate)
+{
+       struct clk *user;
+
+       lockdep_assert_held(&prepare_lock);
+
+       if (min_rate > core->max_rate || max_rate < core->min_rate)
+               return false;
+
+       hlist_for_each_entry(user, &core->clks, clks_node)
+               if (min_rate > user->max_rate || max_rate < user->min_rate)
+                       return false;
+
+       return true;
+}
+
 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
                           unsigned long max_rate)
 {
@@ -1330,6 +1348,8 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
        if (!core)
                return 0;
 
+       req->rate = clamp(req->rate, req->min_rate, req->max_rate);
+
        /*
         * At this point, core protection will be disabled
         * - if the provider is not protected at all
@@ -2312,19 +2332,15 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
 }
 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
 
-/**
- * clk_set_rate_range - set a rate range for a clock source
- * @clk: clock source
- * @min: desired minimum clock rate in Hz, inclusive
- * @max: desired maximum clock rate in Hz, inclusive
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+static int clk_set_rate_range_nolock(struct clk *clk,
+                                    unsigned long min,
+                                    unsigned long max)
 {
        int ret = 0;
        unsigned long old_min, old_max, rate;
 
+       lockdep_assert_held(&prepare_lock);
+
        if (!clk)
                return 0;
 
@@ -2337,8 +2353,6 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
                return -EINVAL;
        }
 
-       clk_prepare_lock();
-
        if (clk->exclusive_count)
                clk_core_rate_unprotect(clk->core);
 
@@ -2348,37 +2362,62 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
        clk->min_rate = min;
        clk->max_rate = max;
 
-       rate = clk_core_get_rate_nolock(clk->core);
-       if (rate < min || rate > max) {
-               /*
-                * FIXME:
-                * We are in bit of trouble here, current rate is outside the
-                * the requested range. We are going try to request appropriate
-                * range boundary but there is a catch. It may fail for the
-                * usual reason (clock broken, clock protected, etc) but also
-                * because:
-                * - round_rate() was not favorable and fell on the wrong
-                *   side of the boundary
-                * - the determine_rate() callback does not really check for
-                *   this corner case when determining the rate
-                */
-
-               if (rate < min)
-                       rate = min;
-               else
-                       rate = max;
+       if (!clk_core_check_boundaries(clk->core, min, max)) {
+               ret = -EINVAL;
+               goto out;
+       }
 
-               ret = clk_core_set_rate_nolock(clk->core, rate);
-               if (ret) {
-                       /* rollback the changes */
-                       clk->min_rate = old_min;
-                       clk->max_rate = old_max;
-               }
+       /*
+        * Since the boundaries have been changed, let's give the
+        * opportunity to the provider to adjust the clock rate based on
+        * the new boundaries.
+        *
+        * We also need to handle the case where the clock is currently
+        * outside of the boundaries. Clamping the last requested rate
+        * to the current minimum and maximum will also handle this.
+        *
+        * FIXME:
+        * There is a catch. It may fail for the usual reason (clock
+        * broken, clock protected, etc) but also because:
+        * - round_rate() was not favorable and fell on the wrong
+        *   side of the boundary
+        * - the determine_rate() callback does not really check for
+        *   this corner case when determining the rate
+        */
+       rate = clamp(clk->core->req_rate, min, max);
+       ret = clk_core_set_rate_nolock(clk->core, rate);
+       if (ret) {
+               /* rollback the changes */
+               clk->min_rate = old_min;
+               clk->max_rate = old_max;
        }
 
+out:
        if (clk->exclusive_count)
                clk_core_rate_protect(clk->core);
 
+       return ret;
+}
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Return: 0 for success or negative errno on failure.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+       int ret;
+
+       if (!clk)
+               return 0;
+
+       clk_prepare_lock();
+
+       ret = clk_set_rate_range_nolock(clk, min, max);
+
        clk_prepare_unlock();
 
        return ret;
@@ -3456,6 +3495,19 @@ static void clk_core_reparent_orphans_nolock(void)
                        __clk_set_parent_after(orphan, parent, NULL);
                        __clk_recalc_accuracies(orphan);
                        __clk_recalc_rates(orphan, 0);
+
+                       /*
+                        * __clk_init_parent() will set the initial req_rate to
+                        * 0 if the clock doesn't have clk_ops::recalc_rate and
+                        * is an orphan when it's registered.
+                        *
+                        * 'req_rate' is used by clk_set_rate_range() and
+                        * clk_put() to trigger a clk_set_rate() call whenever
+                        * the boundaries are modified. Let's make sure
+                        * 'req_rate' is set to something non-zero so that
+                        * clk_set_rate_range() doesn't drop the frequency.
+                        */
+                       orphan->req_rate = orphan->rate;
                }
        }
 }
@@ -3773,8 +3825,9 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
 struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
 {
        struct device *dev = hw->core->dev;
+       const char *name = dev ? dev_name(dev) : NULL;
 
-       return clk_hw_create_clk(dev, hw, dev_name(dev), con_id);
+       return clk_hw_create_clk(dev, hw, name, con_id);
 }
 EXPORT_SYMBOL(clk_hw_get_clk);
 
@@ -4079,7 +4132,7 @@ static const struct clk_ops clk_nodrv_ops = {
 };
 
 static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
-                                               struct clk_core *target)
+                                               const struct clk_core *target)
 {
        int i;
        struct clk_core *child;
@@ -4095,7 +4148,7 @@ static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
 /* Remove this clk from all parent caches */
 static void clk_core_evict_parent_cache(struct clk_core *core)
 {
-       struct hlist_head **lists;
+       const struct hlist_head **lists;
        struct clk_core *root;
 
        lockdep_assert_held(&prepare_lock);
@@ -4366,9 +4419,7 @@ void __clk_put(struct clk *clk)
        }
 
        hlist_del(&clk->clks_node);
-       if (clk->min_rate > clk->core->req_rate ||
-           clk->max_rate < clk->core->req_rate)
-               clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+       clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
 
        owner = clk->core->owner;
        kref_put(&clk->core->ref, __clk_release);
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
new file mode 100644 (file)
index 0000000..fd2339c
--- /dev/null
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for clk rate management
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+/* Needed for clk_hw_get_clk() */
+#include "clk.h"
+
+#include <kunit/test.h>
+
+#define DUMMY_CLOCK_INIT_RATE  (42 * 1000 * 1000)
+#define DUMMY_CLOCK_RATE_1     (142 * 1000 * 1000)
+#define DUMMY_CLOCK_RATE_2     (242 * 1000 * 1000)
+
+struct clk_dummy_context {
+       struct clk_hw hw;
+       unsigned long rate;
+};
+
+static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
+                                          unsigned long parent_rate)
+{
+       struct clk_dummy_context *ctx =
+               container_of(hw, struct clk_dummy_context, hw);
+
+       return ctx->rate;
+}
+
+static int clk_dummy_determine_rate(struct clk_hw *hw,
+                                   struct clk_rate_request *req)
+{
+       /* Just return the same rate without modifying it */
+       return 0;
+}
+
+static int clk_dummy_maximize_rate(struct clk_hw *hw,
+                                  struct clk_rate_request *req)
+{
+       /*
+        * If there's a maximum set, always run the clock at the maximum
+        * allowed.
+        */
+       if (req->max_rate < ULONG_MAX)
+               req->rate = req->max_rate;
+
+       return 0;
+}
+
+static int clk_dummy_minimize_rate(struct clk_hw *hw,
+                                  struct clk_rate_request *req)
+{
+       /*
+        * If there's a minimum set, always run the clock at the minimum
+        * allowed.
+        */
+       if (req->min_rate > 0)
+               req->rate = req->min_rate;
+
+       return 0;
+}
+
+static int clk_dummy_set_rate(struct clk_hw *hw,
+                             unsigned long rate,
+                             unsigned long parent_rate)
+{
+       struct clk_dummy_context *ctx =
+               container_of(hw, struct clk_dummy_context, hw);
+
+       ctx->rate = rate;
+       return 0;
+}
+
+static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
+{
+       if (index >= clk_hw_get_num_parents(hw))
+               return -EINVAL;
+
+       return 0;
+}
+
+static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
+{
+       return 0;
+}
+
+static const struct clk_ops clk_dummy_rate_ops = {
+       .recalc_rate = clk_dummy_recalc_rate,
+       .determine_rate = clk_dummy_determine_rate,
+       .set_rate = clk_dummy_set_rate,
+};
+
+static const struct clk_ops clk_dummy_maximize_rate_ops = {
+       .recalc_rate = clk_dummy_recalc_rate,
+       .determine_rate = clk_dummy_maximize_rate,
+       .set_rate = clk_dummy_set_rate,
+};
+
+static const struct clk_ops clk_dummy_minimize_rate_ops = {
+       .recalc_rate = clk_dummy_recalc_rate,
+       .determine_rate = clk_dummy_minimize_rate,
+       .set_rate = clk_dummy_set_rate,
+};
+
+static const struct clk_ops clk_dummy_single_parent_ops = {
+       .set_parent = clk_dummy_single_set_parent,
+       .get_parent = clk_dummy_single_get_parent,
+};
+
+static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
+{
+       struct clk_dummy_context *ctx;
+       struct clk_init_data init = { };
+       int ret;
+
+       ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       ctx->rate = DUMMY_CLOCK_INIT_RATE;
+       test->priv = ctx;
+
+       init.name = "test_dummy_rate";
+       init.ops = ops;
+       ctx->hw.init = &init;
+
+       ret = clk_hw_register(NULL, &ctx->hw);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int clk_test_init(struct kunit *test)
+{
+       return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
+}
+
+static int clk_maximize_test_init(struct kunit *test)
+{
+       return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
+}
+
+static int clk_minimize_test_init(struct kunit *test)
+{
+       return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
+}
+
+static void clk_test_exit(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+
+       clk_hw_unregister(&ctx->hw);
+}
+
+/*
+ * Test that the actual rate matches what is returned by clk_get_rate()
+ */
+static void clk_test_get_rate(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, ctx->rate);
+}
+
+/*
+ * Test that, after a call to clk_set_rate(), the rate returned by
+ * clk_get_rate() matches.
+ *
+ * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
+ * modify the requested rate, which is our case in clk_dummy_rate_ops.
+ */
+static void clk_test_set_get_rate(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+}
+
+/*
+ * Test that, after several calls to clk_set_rate(), the rate returned
+ * by clk_get_rate() matches the last one.
+ *
+ * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
+ * modify the requested rate, which is our case in clk_dummy_rate_ops.
+ */
+static void clk_test_set_set_get_rate(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+}
+
+/*
+ * Test that clk_round_rate and clk_set_rate are consitent and will
+ * return the same frequency.
+ */
+static void clk_test_round_set_get_rate(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rounded_rate, set_rate;
+
+       rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
+       KUNIT_ASSERT_GT(test, rounded_rate, 0);
+       KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
+                       0);
+
+       set_rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, set_rate, 0);
+       KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
+}
+
+static struct kunit_case clk_test_cases[] = {
+       KUNIT_CASE(clk_test_get_rate),
+       KUNIT_CASE(clk_test_set_get_rate),
+       KUNIT_CASE(clk_test_set_set_get_rate),
+       KUNIT_CASE(clk_test_round_set_get_rate),
+       {}
+};
+
+static struct kunit_suite clk_test_suite = {
+       .name = "clk-test",
+       .init = clk_test_init,
+       .exit = clk_test_exit,
+       .test_cases = clk_test_cases,
+};
+
+struct clk_single_parent_ctx {
+       struct clk_dummy_context parent_ctx;
+       struct clk_hw hw;
+};
+
+static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
+{
+       struct clk_single_parent_ctx *ctx;
+       struct clk_init_data init = { };
+       const char * const parents[] = { "orphan_parent" };
+       int ret;
+
+       ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       test->priv = ctx;
+
+       init.name = "test_orphan_dummy_parent";
+       init.ops = &clk_dummy_single_parent_ops;
+       init.parent_names = parents;
+       init.num_parents = ARRAY_SIZE(parents);
+       init.flags = CLK_SET_RATE_PARENT;
+       ctx->hw.init = &init;
+
+       ret = clk_hw_register(NULL, &ctx->hw);
+       if (ret)
+               return ret;
+
+       memset(&init, 0, sizeof(init));
+       init.name = "orphan_parent";
+       init.ops = &clk_dummy_rate_ops;
+       ctx->parent_ctx.hw.init = &init;
+       ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
+
+       ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void clk_orphan_transparent_single_parent_mux_test_exit(struct kunit *test)
+{
+       struct clk_single_parent_ctx *ctx = test->priv;
+
+       clk_hw_unregister(&ctx->hw);
+       clk_hw_unregister(&ctx->parent_ctx.hw);
+}
+
+/*
+ * Test that a mux-only clock, with an initial rate within a range,
+ * will still have the same rate after the range has been enforced.
+ */
+static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
+{
+       struct clk_single_parent_ctx *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate, new_rate;
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          ctx->parent_ctx.rate - 1000,
+                                          ctx->parent_ctx.rate + 1000),
+                       0);
+
+       new_rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, new_rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, new_rate);
+}
+
+static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
+       KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
+       {}
+};
+
+static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
+       .name = "clk-orphan-transparent-single-parent-test",
+       .init = clk_orphan_transparent_single_parent_mux_test_init,
+       .exit = clk_orphan_transparent_single_parent_mux_test_exit,
+       .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
+};
+
+/*
+ * Test that clk_set_rate_range won't return an error for a valid range
+ * and that it will make sure the rate of the clock is within the
+ * boundaries.
+ */
+static void clk_range_test_set_range(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+       KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+}
+
+/*
+ * Test that calling clk_set_rate_range with a minimum rate higher than
+ * the maximum rate returns an error.
+ */
+static void clk_range_test_set_range_invalid(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+
+       KUNIT_EXPECT_LT(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1 + 1000,
+                                          DUMMY_CLOCK_RATE_1),
+                       0);
+}
+
+/*
+ * Test that users can't set multiple, disjoints, range that would be
+ * impossible to meet.
+ */
+static void clk_range_test_multiple_disjoints_range(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *user1, *user2;
+
+       user1 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+       user2 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user1, 1000, 2000),
+                       0);
+
+       KUNIT_EXPECT_LT(test,
+                       clk_set_rate_range(user2, 3000, 4000),
+                       0);
+
+       clk_put(user2);
+       clk_put(user1);
+}
+
+/*
+ * Test that if our clock has some boundaries and we try to round a rate
+ * lower than the minimum, the returned rate will be within range.
+ */
+static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+       KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+}
+
+/*
+ * Test that if our clock has some boundaries and we try to set a rate
+ * higher than the maximum, the new rate will be within range.
+ */
+static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+       KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+}
+
+/*
+ * Test that if our clock has some boundaries and we try to round and
+ * set a rate lower than the minimum, the rate returned by
+ * clk_round_rate() will be consistent with the new rate set by
+ * clk_set_rate().
+ */
+static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       long rounded;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+       KUNIT_ASSERT_GT(test, rounded, 0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
+                       0);
+
+       KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
+}
+
+/*
+ * Test that if our clock has some boundaries and we try to round a rate
+ * higher than the maximum, the returned rate will be within range.
+ */
+static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+       KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+}
+
+/*
+ * Test that if our clock has some boundaries and we try to set a rate
+ * higher than the maximum, the new rate will be within range.
+ */
+static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+       KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+}
+
+/*
+ * Test that if our clock has some boundaries and we try to round and
+ * set a rate higher than the maximum, the rate returned by
+ * clk_round_rate() will be consistent with the new rate set by
+ * clk_set_rate().
+ */
+static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       long rounded;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
+       KUNIT_ASSERT_GT(test, rounded, 0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
+                       0);
+
+       KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
+}
+
+/*
+ * Test that if our clock has a rate lower than the minimum set by a
+ * call to clk_set_rate_range(), the rate will be raised to match the
+ * new minimum.
+ *
+ * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
+ * modify the requested rate, which is our case in clk_dummy_rate_ops.
+ */
+static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+}
+
+/*
+ * Test that if our clock has a rate higher than the maximum set by a
+ * call to clk_set_rate_range(), the rate will be lowered to match the
+ * new maximum.
+ *
+ * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
+ * modify the requested rate, which is our case in clk_dummy_rate_ops.
+ */
+static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+}
+
+static struct kunit_case clk_range_test_cases[] = {
+       KUNIT_CASE(clk_range_test_set_range),
+       KUNIT_CASE(clk_range_test_set_range_invalid),
+       KUNIT_CASE(clk_range_test_multiple_disjoints_range),
+       KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
+       KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
+       KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
+       KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
+       KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
+       KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
+       KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
+       KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
+       {}
+};
+
+static struct kunit_suite clk_range_test_suite = {
+       .name = "clk-range-test",
+       .init = clk_test_init,
+       .exit = clk_test_exit,
+       .test_cases = clk_range_test_cases,
+};
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), the core will reevaluate whether a new rate is
+ * needed each and every time.
+ *
+ * With clk_dummy_maximize_rate_ops, this means that the rate will
+ * trail along the maximum as it evolves.
+ */
+static void clk_range_test_set_range_rate_maximized(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2 - 1000),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+}
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), across multiple users, the core will reevaluate
+ * whether a new rate is needed each and every time.
+ *
+ * With clk_dummy_maximize_rate_ops, this means that the rate will
+ * trail along the maximum as it evolves.
+ */
+static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       struct clk *user1, *user2;
+       unsigned long rate;
+
+       user1 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+       user2 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user1,
+                                          0,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user2,
+                                          0,
+                                          DUMMY_CLOCK_RATE_1),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_drop_range(user2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+       clk_put(user2);
+       clk_put(user1);
+}
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), across multiple users, the core will reevaluate
+ * whether a new rate is needed, including when a user drop its clock.
+ *
+ * With clk_dummy_maximize_rate_ops, this means that the rate will
+ * trail along the maximum as it evolves.
+ */
+static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       struct clk *user1, *user2;
+       unsigned long rate;
+
+       user1 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+       user2 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user1,
+                                          0,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user2,
+                                          0,
+                                          DUMMY_CLOCK_RATE_1),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+       clk_put(user2);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+       clk_put(user1);
+}
+
+static struct kunit_case clk_range_maximize_test_cases[] = {
+       KUNIT_CASE(clk_range_test_set_range_rate_maximized),
+       KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
+       KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
+       {}
+};
+
+static struct kunit_suite clk_range_maximize_test_suite = {
+       .name = "clk-range-maximize-test",
+       .init = clk_maximize_test_init,
+       .exit = clk_test_exit,
+       .test_cases = clk_range_maximize_test_cases,
+};
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), the core will reevaluate whether a new rate is
+ * needed each and every time.
+ *
+ * With clk_dummy_minimize_rate_ops, this means that the rate will
+ * trail along the minimum as it evolves.
+ */
+static void clk_range_test_set_range_rate_minimized(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       unsigned long rate;
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
+                       0);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1 + 1000,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(clk,
+                                          DUMMY_CLOCK_RATE_1,
+                                          DUMMY_CLOCK_RATE_2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+}
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), across multiple users, the core will reevaluate
+ * whether a new rate is needed each and every time.
+ *
+ * With clk_dummy_minimize_rate_ops, this means that the rate will
+ * trail along the minimum as it evolves.
+ */
+static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       struct clk *user1, *user2;
+       unsigned long rate;
+
+       user1 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+       user2 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user1,
+                                          DUMMY_CLOCK_RATE_1,
+                                          ULONG_MAX),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user2,
+                                          DUMMY_CLOCK_RATE_2,
+                                          ULONG_MAX),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_drop_range(user2),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+       clk_put(user2);
+       clk_put(user1);
+}
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), across multiple users, the core will reevaluate
+ * whether a new rate is needed, including when a user drop its clock.
+ *
+ * With clk_dummy_minimize_rate_ops, this means that the rate will
+ * trail along the minimum as it evolves.
+ */
+static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
+{
+       struct clk_dummy_context *ctx = test->priv;
+       struct clk_hw *hw = &ctx->hw;
+       struct clk *clk = hw->clk;
+       struct clk *user1, *user2;
+       unsigned long rate;
+
+       user1 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+       user2 = clk_hw_get_clk(hw, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user1,
+                                          DUMMY_CLOCK_RATE_1,
+                                          ULONG_MAX),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+       KUNIT_ASSERT_EQ(test,
+                       clk_set_rate_range(user2,
+                                          DUMMY_CLOCK_RATE_2,
+                                          ULONG_MAX),
+                       0);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+       clk_put(user2);
+
+       rate = clk_get_rate(clk);
+       KUNIT_ASSERT_GT(test, rate, 0);
+       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+       clk_put(user1);
+}
+
+static struct kunit_case clk_range_minimize_test_cases[] = {
+       KUNIT_CASE(clk_range_test_set_range_rate_minimized),
+       KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
+       KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
+       {}
+};
+
+static struct kunit_suite clk_range_minimize_test_suite = {
+       .name = "clk-range-minimize-test",
+       .init = clk_minimize_test_init,
+       .exit = clk_test_exit,
+       .test_cases = clk_range_minimize_test_cases,
+};
+
+kunit_test_suites(
+       &clk_test_suite,
+       &clk_orphan_transparent_single_parent_test_suite,
+       &clk_range_test_suite,
+       &clk_range_maximize_test_suite,
+       &clk_range_minimize_test_suite
+);
+MODULE_LICENSE("GPL v2");
index 56012a3..9ea1a80 100644 (file)
@@ -611,8 +611,8 @@ static struct hisi_mux_clock hi3559av100_shub_mux_clks[] = {
 
 
 /* shub div clk */
-static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}};
-static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}};
+static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}, {/*sentinel*/}};
+static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}, {/*sentinel*/}};
 
 static struct hisi_divider_clock hi3559av100_shub_div_clks[] = {
        { HI3559AV100_SHUB_SPI_SOURCE_CLK, "clk_spi_clk", "shub_clk", 0, 0x20, 24, 2,
index 9361fba..54d9fdc 100644 (file)
@@ -162,7 +162,7 @@ int hisi_clk_register_mux(const struct hisi_mux_clock *clks,
                                        clks[i].num_parents, clks[i].flags,
                                        base + clks[i].offset, clks[i].shift,
                                        mask, clks[i].mux_flags,
-                                       (u32 *)clks[i].table, &hisi_clk_lock);
+                                       clks[i].table, &hisi_clk_lock);
                if (IS_ERR(clk)) {
                        pr_err("%s: failed to register clock %s\n",
                               __func__, clks[i].name);
index c08edbd..25785ec 100644 (file)
@@ -105,3 +105,17 @@ config CLK_IMX8ULP
        select MXC_CLK
        help
            Build the driver for i.MX8ULP CCM Clock Driver
+
+config CLK_IMX93
+       tristate "IMX93 CCM Clock Driver"
+       depends on ARCH_MXC || COMPILE_TEST
+       select MXC_CLK
+       help
+           Build the driver for i.MX93 CCM Clock Driver
+
+config CLK_IMXRT1050
+       tristate "IMXRT1050 CCM Clock Driver"
+       depends on SOC_IMXRT
+       select MXC_CLK
+       help
+           Build the driver for i.MXRT1050 CCM Clock Driver
index b5e0400..88b9b92 100644 (file)
@@ -4,6 +4,8 @@ mxc-clk-objs += clk.o
 mxc-clk-objs += clk-busy.o
 mxc-clk-objs += clk-composite-7ulp.o
 mxc-clk-objs += clk-composite-8m.o
+mxc-clk-objs += clk-composite-93.o
+mxc-clk-objs += clk-fracn-gppll.o
 mxc-clk-objs += clk-cpu.o
 mxc-clk-objs += clk-divider-gate.o
 mxc-clk-objs += clk-fixup-div.o
@@ -26,9 +28,12 @@ obj-$(CONFIG_CLK_IMX8MN) += clk-imx8mn.o
 obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o
 obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
 
+obj-$(CONFIG_CLK_IMX93) += clk-imx93.o
+
 obj-$(CONFIG_MXC_CLK_SCU) += clk-imx-scu.o clk-imx-lpcg-scu.o
 clk-imx-scu-$(CONFIG_CLK_IMX8QXP) += clk-scu.o clk-imx8qxp.o \
-                                    clk-imx8qxp-rsrc.o clk-imx8qm-rsrc.o
+                                    clk-imx8qxp-rsrc.o clk-imx8qm-rsrc.o \
+                                    clk-imx8dxl-rsrc.o
 clk-imx-lpcg-scu-$(CONFIG_CLK_IMX8QXP) += clk-lpcg-scu.o clk-imx8qxp-lpcg.o
 
 obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp.o
@@ -46,4 +51,5 @@ obj-$(CONFIG_CLK_IMX6SX) += clk-imx6sx.o
 obj-$(CONFIG_CLK_IMX6UL) += clk-imx6ul.o
 obj-$(CONFIG_CLK_IMX7D)  += clk-imx7d.o
 obj-$(CONFIG_CLK_IMX7ULP) += clk-imx7ulp.o
+obj-$(CONFIG_CLK_IMXRT1050)  += clk-imxrt1050.o
 obj-$(CONFIG_CLK_VF610)  += clk-vf610.o
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
new file mode 100644 (file)
index 0000000..b44619a
--- /dev/null
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+#define CCM_DIV_SHIFT  0
+#define CCM_DIV_WIDTH  8
+#define CCM_MUX_SHIFT  8
+#define CCM_MUX_MASK   3
+#define CCM_OFF_SHIFT  24
+
+#define AUTHEN_OFFSET  0x30
+#define TZ_NS_SHIFT    9
+#define TZ_NS_MASK     BIT(9)
+
+struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
+                                        int num_parents, void __iomem *reg,
+                                        unsigned long flags)
+{
+       struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
+       struct clk_hw *div_hw, *gate_hw;
+       struct clk_divider *div = NULL;
+       struct clk_gate *gate = NULL;
+       struct clk_mux *mux = NULL;
+       bool clk_ro = false;
+
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               goto fail;
+
+       mux_hw = &mux->hw;
+       mux->reg = reg;
+       mux->shift = CCM_MUX_SHIFT;
+       mux->mask = CCM_MUX_MASK;
+       mux->lock = &imx_ccm_lock;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               goto fail;
+
+       div_hw = &div->hw;
+       div->reg = reg;
+       div->shift = CCM_DIV_SHIFT;
+       div->width = CCM_DIV_WIDTH;
+       div->lock = &imx_ccm_lock;
+       div->flags = CLK_DIVIDER_ROUND_CLOSEST;
+
+       if (!(readl(reg + AUTHEN_OFFSET) & TZ_NS_MASK))
+               clk_ro = true;
+
+       if (clk_ro) {
+               hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+                                              mux_hw, &clk_mux_ro_ops, div_hw,
+                                              &clk_divider_ro_ops, NULL, NULL, flags);
+       } else {
+               gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+               if (!gate)
+                       goto fail;
+
+               gate_hw = &gate->hw;
+               gate->reg = reg;
+               gate->bit_idx = CCM_OFF_SHIFT;
+               gate->lock = &imx_ccm_lock;
+               gate->flags = CLK_GATE_SET_TO_DISABLE;
+
+               hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+                                              mux_hw, &clk_mux_ops, div_hw,
+                                              &clk_divider_ops, gate_hw,
+                                              &clk_gate_ops, flags | CLK_SET_RATE_NO_REPARENT);
+       }
+
+       if (IS_ERR(hw))
+               goto fail;
+
+       return hw;
+
+fail:
+       kfree(gate);
+       kfree(div);
+       kfree(mux);
+       return ERR_CAST(hw);
+}
+EXPORT_SYMBOL_GPL(imx93_clk_composite_flags);
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
new file mode 100644 (file)
index 0000000..71c102d
--- /dev/null
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2021 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define PLL_CTRL               0x0
+#define CLKMUX_BYPASS          BIT(2)
+#define CLKMUX_EN              BIT(1)
+#define POWERUP_MASK           BIT(0)
+
+#define PLL_ANA_PRG            0x10
+#define PLL_SPREAD_SPECTRUM    0x30
+
+#define PLL_NUMERATOR          0x40
+#define PLL_MFN_MASK           GENMASK(31, 2)
+
+#define PLL_DENOMINATOR                0x50
+#define PLL_MFD_MASK           GENMASK(29, 0)
+
+#define PLL_DIV                        0x60
+#define PLL_MFI_MASK           GENMASK(24, 16)
+#define PLL_RDIV_MASK          GENMASK(15, 13)
+#define PLL_ODIV_MASK          GENMASK(7, 0)
+
+#define PLL_DFS_CTRL(x)                (0x70 + (x) * 0x10)
+
+#define PLL_STATUS             0xF0
+#define LOCK_STATUS            BIT(0)
+
+#define DFS_STATUS             0xF4
+
+#define LOCK_TIMEOUT_US                200
+
+#define PLL_FRACN_GP(_rate, _mfi, _mfn, _mfd, _rdiv, _odiv)    \
+       {                                                       \
+               .rate   =       (_rate),                        \
+               .mfi    =       (_mfi),                         \
+               .mfn    =       (_mfn),                         \
+               .mfd    =       (_mfd),                         \
+               .rdiv   =       (_rdiv),                        \
+               .odiv   =       (_odiv),                        \
+       }
+
+struct clk_fracn_gppll {
+       struct clk_hw                   hw;
+       void __iomem                    *base;
+       const struct imx_fracn_gppll_rate_table *rate_table;
+       int rate_count;
+};
+
+/*
+ * Fvco = Fref * (MFI + MFN / MFD)
+ * Fout = Fvco / (rdiv * odiv)
+ */
+static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
+       PLL_FRACN_GP(650000000U, 81, 0, 0, 0, 3),
+       PLL_FRACN_GP(594000000U, 198, 0, 0, 0, 8),
+       PLL_FRACN_GP(560000000U, 70, 0, 0, 0, 3),
+       PLL_FRACN_GP(400000000U, 50, 0, 0, 0, 3),
+       PLL_FRACN_GP(393216000U, 81, 92, 100, 0, 5)
+};
+
+struct imx_fracn_gppll_clk imx_fracn_gppll = {
+       .rate_table = fracn_tbl,
+       .rate_count = ARRAY_SIZE(fracn_tbl),
+};
+EXPORT_SYMBOL_GPL(imx_fracn_gppll);
+
+static inline struct clk_fracn_gppll *to_clk_fracn_gppll(struct clk_hw *hw)
+{
+       return container_of(hw, struct clk_fracn_gppll, hw);
+}
+
+static const struct imx_fracn_gppll_rate_table *
+imx_get_pll_settings(struct clk_fracn_gppll *pll, unsigned long rate)
+{
+       const struct imx_fracn_gppll_rate_table *rate_table = pll->rate_table;
+       int i;
+
+       for (i = 0; i < pll->rate_count; i++)
+               if (rate == rate_table[i].rate)
+                       return &rate_table[i];
+
+       return NULL;
+}
+
+static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long *prate)
+{
+       struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
+       const struct imx_fracn_gppll_rate_table *rate_table = pll->rate_table;
+       int i;
+
+       /* Assuming rate_table is in descending order */
+       for (i = 0; i < pll->rate_count; i++)
+               if (rate >= rate_table[i].rate)
+                       return rate_table[i].rate;
+
+       /* return minimum supported value */
+       return rate_table[pll->rate_count - 1].rate;
+}
+
+static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+       struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
+       const struct imx_fracn_gppll_rate_table *rate_table = pll->rate_table;
+       u32 pll_numerator, pll_denominator, pll_div;
+       u32 mfi, mfn, mfd, rdiv, odiv;
+       u64 fvco = parent_rate;
+       long rate = 0;
+       int i;
+
+       pll_numerator = readl_relaxed(pll->base + PLL_NUMERATOR);
+       mfn = FIELD_GET(PLL_MFN_MASK, pll_numerator);
+
+       pll_denominator = readl_relaxed(pll->base + PLL_DENOMINATOR);
+       mfd = FIELD_GET(PLL_MFD_MASK, pll_denominator);
+
+       pll_div = readl_relaxed(pll->base + PLL_DIV);
+       mfi = FIELD_GET(PLL_MFI_MASK, pll_div);
+
+       rdiv = FIELD_GET(PLL_RDIV_MASK, pll_div);
+       rdiv = rdiv + 1;
+       odiv = FIELD_GET(PLL_ODIV_MASK, pll_div);
+       switch (odiv) {
+       case 0:
+               odiv = 2;
+               break;
+       case 1:
+               odiv = 3;
+               break;
+       default:
+               break;
+       }
+
+       /*
+        * Sometimes, the recalculated rate has deviation due to
+        * the frac part. So find the accurate pll rate from the table
+        * first, if no match rate in the table, use the rate calculated
+        * from the equation below.
+        */
+       for (i = 0; i < pll->rate_count; i++) {
+               if (rate_table[i].mfn == mfn && rate_table[i].mfi == mfi &&
+                   rate_table[i].mfd == mfd && rate_table[i].rdiv == rdiv &&
+                   rate_table[i].odiv == odiv)
+                       rate = rate_table[i].rate;
+       }
+
+       if (rate)
+               return (unsigned long)rate;
+
+       /* Fvco = Fref * (MFI + MFN / MFD) */
+       fvco = fvco * mfi * mfd + fvco * mfn;
+       do_div(fvco, mfd * rdiv * odiv);
+
+       return (unsigned long)fvco;
+}
+
+static int clk_fracn_gppll_wait_lock(struct clk_fracn_gppll *pll)
+{
+       u32 val;
+
+       return readl_poll_timeout(pll->base + PLL_STATUS, val,
+                                 val & LOCK_STATUS, 0, LOCK_TIMEOUT_US);
+}
+
+static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
+                                   unsigned long prate)
+{
+       struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
+       const struct imx_fracn_gppll_rate_table *rate;
+       u32 tmp, pll_div, ana_mfn;
+       int ret;
+
+       rate = imx_get_pll_settings(pll, drate);
+
+       /* Disable output */
+       tmp = readl_relaxed(pll->base + PLL_CTRL);
+       tmp &= ~CLKMUX_EN;
+       writel_relaxed(tmp, pll->base + PLL_CTRL);
+
+       /* Power Down */
+       tmp &= ~POWERUP_MASK;
+       writel_relaxed(tmp, pll->base + PLL_CTRL);
+
+       /* Disable BYPASS */
+       tmp &= ~CLKMUX_BYPASS;
+       writel_relaxed(tmp, pll->base + PLL_CTRL);
+
+       pll_div = FIELD_PREP(PLL_RDIV_MASK, rate->rdiv) | rate->odiv |
+               FIELD_PREP(PLL_MFI_MASK, rate->mfi);
+       writel_relaxed(pll_div, pll->base + PLL_DIV);
+       writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR);
+       writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR);
+
+       /* Wait for 5us according to fracn mode pll doc */
+       udelay(5);
+
+       /* Enable Powerup */
+       tmp |= POWERUP_MASK;
+       writel_relaxed(tmp, pll->base + PLL_CTRL);
+
+       /* Wait Lock */
+       ret = clk_fracn_gppll_wait_lock(pll);
+       if (ret)
+               return ret;
+
+       /* Enable output */
+       tmp |= CLKMUX_EN;
+       writel_relaxed(tmp, pll->base + PLL_CTRL);
+
+       ana_mfn = readl_relaxed(pll->base + PLL_STATUS);
+       ana_mfn = FIELD_GET(PLL_MFN_MASK, ana_mfn);
+
+       WARN(ana_mfn != rate->mfn, "ana_mfn != rate->mfn\n");
+
+       return 0;
+}
+
+static int clk_fracn_gppll_prepare(struct clk_hw *hw)
+{
+       struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
+       u32 val;
+       int ret;
+
+       val = readl_relaxed(pll->base + PLL_CTRL);
+       if (val & POWERUP_MASK)
+               return 0;
+
+       val |= CLKMUX_BYPASS;
+       writel_relaxed(val, pll->base + PLL_CTRL);
+
+       val |= POWERUP_MASK;
+       writel_relaxed(val, pll->base + PLL_CTRL);
+
+       val |= CLKMUX_EN;
+       writel_relaxed(val, pll->base + PLL_CTRL);
+
+       ret = clk_fracn_gppll_wait_lock(pll);
+       if (ret)
+               return ret;
+
+       val &= ~CLKMUX_BYPASS;
+       writel_relaxed(val, pll->base + PLL_CTRL);
+
+       return 0;
+}
+
+static int clk_fracn_gppll_is_prepared(struct clk_hw *hw)
+{
+       struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
+       u32 val;
+
+       val = readl_relaxed(pll->base + PLL_CTRL);
+
+       return (val & POWERUP_MASK) ? 1 : 0;
+}
+
+static void clk_fracn_gppll_unprepare(struct clk_hw *hw)
+{
+       struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
+       u32 val;
+
+       val = readl_relaxed(pll->base + PLL_CTRL);
+       val &= ~POWERUP_MASK;
+       writel_relaxed(val, pll->base + PLL_CTRL);
+}
+
+static const struct clk_ops clk_fracn_gppll_ops = {
+       .prepare        = clk_fracn_gppll_prepare,
+       .unprepare      = clk_fracn_gppll_unprepare,
+       .is_prepared    = clk_fracn_gppll_is_prepared,
+       .recalc_rate    = clk_fracn_gppll_recalc_rate,
+       .round_rate     = clk_fracn_gppll_round_rate,
+       .set_rate       = clk_fracn_gppll_set_rate,
+};
+
+struct clk_hw *imx_clk_fracn_gppll(const char *name, const char *parent_name, void __iomem *base,
+                                  const struct imx_fracn_gppll_clk *pll_clk)
+{
+       struct clk_fracn_gppll *pll;
+       struct clk_hw *hw;
+       struct clk_init_data init;
+       int ret;
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.flags = pll_clk->flags;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+       init.ops = &clk_fracn_gppll_ops;
+
+       pll->base = base;
+       pll->hw.init = &init;
+       pll->rate_table = pll_clk->rate_table;
+       pll->rate_count = pll_clk->rate_count;
+
+       hw = &pll->hw;
+
+       ret = clk_hw_register(NULL, hw);
+       if (ret) {
+               pr_err("%s: failed to register pll %s %d\n", __func__, name, ret);
+               kfree(pll);
+               return ERR_PTR(ret);
+       }
+
+       return hw;
+}
+EXPORT_SYMBOL_GPL(imx_clk_fracn_gppll);
index c4e0f1c..3f6fd7e 100644 (file)
@@ -849,7 +849,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
        hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0);
        hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0);
        hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
-       hws[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_hw_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0);
        hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
        hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0);
        hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0);
diff --git a/drivers/clk/imx/clk-imx8dxl-rsrc.c b/drivers/clk/imx/clk-imx8dxl-rsrc.c
new file mode 100644 (file)
index 0000000..69b7aa3
--- /dev/null
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019~2020 NXP
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+#include "clk-scu.h"
+
+/* Keep sorted in the ascending order */
+static u32 imx8dxl_clk_scu_rsrc_table[] = {
+       IMX_SC_R_SPI_0,
+       IMX_SC_R_SPI_1,
+       IMX_SC_R_SPI_2,
+       IMX_SC_R_SPI_3,
+       IMX_SC_R_UART_0,
+       IMX_SC_R_UART_1,
+       IMX_SC_R_UART_2,
+       IMX_SC_R_UART_3,
+       IMX_SC_R_I2C_0,
+       IMX_SC_R_I2C_1,
+       IMX_SC_R_I2C_2,
+       IMX_SC_R_I2C_3,
+       IMX_SC_R_ADC_0,
+       IMX_SC_R_FTM_0,
+       IMX_SC_R_FTM_1,
+       IMX_SC_R_CAN_0,
+       IMX_SC_R_LCD_0,
+       IMX_SC_R_LCD_0_PWM_0,
+       IMX_SC_R_PWM_0,
+       IMX_SC_R_PWM_1,
+       IMX_SC_R_PWM_2,
+       IMX_SC_R_PWM_3,
+       IMX_SC_R_PWM_4,
+       IMX_SC_R_PWM_5,
+       IMX_SC_R_PWM_6,
+       IMX_SC_R_PWM_7,
+       IMX_SC_R_GPT_0,
+       IMX_SC_R_GPT_1,
+       IMX_SC_R_GPT_2,
+       IMX_SC_R_GPT_3,
+       IMX_SC_R_GPT_4,
+       IMX_SC_R_FSPI_0,
+       IMX_SC_R_FSPI_1,
+       IMX_SC_R_SDHC_0,
+       IMX_SC_R_SDHC_1,
+       IMX_SC_R_SDHC_2,
+       IMX_SC_R_ENET_0,
+       IMX_SC_R_ENET_1,
+       IMX_SC_R_MLB_0,
+       IMX_SC_R_USB_1,
+       IMX_SC_R_NAND,
+       IMX_SC_R_M4_0_I2C,
+       IMX_SC_R_M4_0_UART,
+       IMX_SC_R_ELCDIF_PLL,
+       IMX_SC_R_AUDIO_PLL_0,
+       IMX_SC_R_AUDIO_PLL_1,
+       IMX_SC_R_AUDIO_CLK_0,
+       IMX_SC_R_AUDIO_CLK_1,
+       IMX_SC_R_A35
+};
+
+const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8dxl = {
+       .rsrc = imx8dxl_clk_scu_rsrc_table,
+       .num = ARRAY_SIZE(imx8dxl_clk_scu_rsrc_table),
+};
index e92621f..e8cbe18 100644 (file)
@@ -366,45 +366,28 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
        hws[IMX8MM_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
 
        /* SYS PLL1 fixed output */
-       hws[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
-       hws[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
-       hws[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
-       hws[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
-       hws[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
-       hws[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
-       hws[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
-       hws[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
        hws[IMX8MM_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
 
-       hws[IMX8MM_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
-       hws[IMX8MM_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
-       hws[IMX8MM_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
-       hws[IMX8MM_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
-       hws[IMX8MM_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
-       hws[IMX8MM_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
-       hws[IMX8MM_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
-       hws[IMX8MM_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+       hws[IMX8MM_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+       hws[IMX8MM_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+       hws[IMX8MM_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+       hws[IMX8MM_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+       hws[IMX8MM_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+       hws[IMX8MM_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+       hws[IMX8MM_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+       hws[IMX8MM_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
        hws[IMX8MM_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
 
        /* SYS PLL2 fixed output */
-       hws[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
-       hws[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
-       hws[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
-       hws[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
-       hws[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
-       hws[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
-       hws[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
-       hws[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
        hws[IMX8MM_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
-
-       hws[IMX8MM_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
-       hws[IMX8MM_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
-       hws[IMX8MM_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
-       hws[IMX8MM_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
-       hws[IMX8MM_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
-       hws[IMX8MM_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
-       hws[IMX8MM_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
-       hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+       hws[IMX8MM_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+       hws[IMX8MM_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+       hws[IMX8MM_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+       hws[IMX8MM_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+       hws[IMX8MM_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+       hws[IMX8MM_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+       hws[IMX8MM_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+       hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
        hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
 
        hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
index 021355a..92fcbab 100644 (file)
@@ -364,45 +364,27 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
        hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
 
        /* SYS PLL1 fixed output */
-       hws[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
-       hws[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
-       hws[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
-       hws[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
-       hws[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
-       hws[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
-       hws[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
-       hws[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
        hws[IMX8MN_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
-
-       hws[IMX8MN_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
-       hws[IMX8MN_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
-       hws[IMX8MN_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
-       hws[IMX8MN_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
-       hws[IMX8MN_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
-       hws[IMX8MN_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
-       hws[IMX8MN_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
-       hws[IMX8MN_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+       hws[IMX8MN_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+       hws[IMX8MN_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+       hws[IMX8MN_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+       hws[IMX8MN_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+       hws[IMX8MN_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+       hws[IMX8MN_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+       hws[IMX8MN_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+       hws[IMX8MN_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
        hws[IMX8MN_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
 
        /* SYS PLL2 fixed output */
-       hws[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
-       hws[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
-       hws[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
-       hws[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
-       hws[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
-       hws[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
-       hws[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
-       hws[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
        hws[IMX8MN_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
-
-       hws[IMX8MN_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
-       hws[IMX8MN_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
-       hws[IMX8MN_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
-       hws[IMX8MN_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
-       hws[IMX8MN_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
-       hws[IMX8MN_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
-       hws[IMX8MN_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
-       hws[IMX8MN_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+       hws[IMX8MN_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+       hws[IMX8MN_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+       hws[IMX8MN_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+       hws[IMX8MN_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+       hws[IMX8MN_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+       hws[IMX8MN_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+       hws[IMX8MN_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+       hws[IMX8MN_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
        hws[IMX8MN_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
 
        hws[IMX8MN_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
index c990ad3..18f5b7c 100644 (file)
@@ -480,44 +480,28 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
        hws[IMX8MP_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", anatop_base + 0x84, 11);
        hws[IMX8MP_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", anatop_base + 0x114, 11);
 
-       hws[IMX8MP_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1_bypass", anatop_base + 0x94, 27);
-       hws[IMX8MP_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1_bypass", anatop_base + 0x94, 25);
-       hws[IMX8MP_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1_bypass", anatop_base + 0x94, 23);
-       hws[IMX8MP_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1_bypass", anatop_base + 0x94, 21);
-       hws[IMX8MP_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1_bypass", anatop_base + 0x94, 19);
-       hws[IMX8MP_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1_bypass", anatop_base + 0x94, 17);
-       hws[IMX8MP_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1_bypass", anatop_base + 0x94, 15);
-       hws[IMX8MP_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1_bypass", anatop_base + 0x94, 13);
        hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11);
 
-       hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
-       hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
-       hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
-       hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
-       hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
-       hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
-       hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
-       hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+       hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+       hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+       hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+       hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+       hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+       hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+       hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+       hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
        hws[IMX8MP_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
 
-       hws[IMX8MP_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2_bypass", anatop_base + 0x104, 27);
-       hws[IMX8MP_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2_bypass", anatop_base + 0x104, 25);
-       hws[IMX8MP_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2_bypass", anatop_base + 0x104, 23);
-       hws[IMX8MP_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2_bypass", anatop_base + 0x104, 21);
-       hws[IMX8MP_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2_bypass", anatop_base + 0x104, 19);
-       hws[IMX8MP_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2_bypass", anatop_base + 0x104, 17);
-       hws[IMX8MP_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2_bypass", anatop_base + 0x104, 15);
-       hws[IMX8MP_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2_bypass", anatop_base + 0x104, 13);
        hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11);
 
-       hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
-       hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
-       hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
-       hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
-       hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
-       hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
-       hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
-       hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+       hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+       hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+       hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+       hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+       hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+       hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+       hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+       hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
        hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
 
        hws[IMX8MP_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mp_a53_sels, ccm_base + 0x8000);
@@ -694,6 +678,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
        hws[IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam2_pix_root_clk", "media_cam2_pix", ccm_base + 0x45d0, 0, &share_count_media);
        hws[IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp1_pix_root_clk", "media_disp1_pix", ccm_base + 0x45d0, 0, &share_count_media);
        hws[IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp2_pix_root_clk", "media_disp2_pix", ccm_base + 0x45d0, 0, &share_count_media);
+       hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT] = imx_clk_hw_gate2_shared2("media_mipi_phy1_ref_root", "media_mipi_phy1_ref", ccm_base + 0x45d0, 0, &share_count_media);
        hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp", ccm_base + 0x45d0, 0, &share_count_media);
 
        hws[IMX8MP_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", ccm_base + 0x45e0, 0);
index b237580..5e31a6a 100644 (file)
@@ -248,7 +248,7 @@ static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
 
        for (i = 0; i < count; i++) {
                idx = bit_offset[i] / 4;
-               if (idx > IMX_LPCG_MAX_CLKS) {
+               if (idx >= IMX_LPCG_MAX_CLKS) {
                        dev_warn(&pdev->dev, "invalid bit offset of clock %d\n",
                                 i);
                        ret = -EINVAL;
index 40a2efb..546a370 100644 (file)
@@ -295,6 +295,7 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
 
 static const struct of_device_id imx8qxp_match[] = {
        { .compatible = "fsl,scu-clk", },
+       { .compatible = "fsl,imx8dxl-clk", &imx_clk_scu_rsrc_imx8dxl, },
        { .compatible = "fsl,imx8qxp-clk", &imx_clk_scu_rsrc_imx8qxp, },
        { .compatible = "fsl,imx8qm-clk", &imx_clk_scu_rsrc_imx8qm, },
        { /* sentinel */ }
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
new file mode 100644 (file)
index 0000000..edcc876
--- /dev/null
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2021 NXP.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <dt-bindings/clock/imx93-clock.h>
+
+#include "clk.h"
+
+enum clk_sel {
+       LOW_SPEED_IO_SEL,
+       NON_IO_SEL,
+       FAST_SEL,
+       AUDIO_SEL,
+       VIDEO_SEL,
+       TPM_SEL,
+       CKO1_SEL,
+       CKO2_SEL,
+       MISC_SEL,
+       MAX_SEL
+};
+
+static const char *parent_names[MAX_SEL][4] = {
+       {"osc_24m", "sys_pll_pfd0_div2", "sys_pll_pfd1_div2", "video_pll"},
+       {"osc_24m", "sys_pll_pfd0_div2", "sys_pll_pfd1_div2", "sys_pll_pfd2_div2"},
+       {"osc_24m", "sys_pll_pfd0", "sys_pll_pfd1", "sys_pll_pfd2"},
+       {"osc_24m", "audio_pll", "video_pll", "clk_ext1"},
+       {"osc_24m", "audio_pll", "video_pll", "sys_pll_pfd0"},
+       {"osc_24m", "sys_pll_pfd0", "audio_pll", "clk_ext1"},
+       {"osc_24m", "sys_pll_pfd0", "sys_pll_pfd1", "audio_pll"},
+       {"osc_24m", "sys_pll_pfd0", "sys_pll_pfd1", "video_pll"},
+       {"osc_24m", "audio_pll", "video_pll", "sys_pll_pfd2"},
+};
+
+static const struct imx93_clk_root {
+       u32 clk;
+       char *name;
+       u32 off;
+       enum clk_sel sel;
+       unsigned long flags;
+} root_array[] = {
+       /* a55/m33/bus critical clk for system run */
+       { IMX93_CLK_A55_PERIPH,         "a55_periph_root",      0x0000, FAST_SEL, CLK_IS_CRITICAL },
+       { IMX93_CLK_A55_MTR_BUS,        "a55_mtr_bus_root",     0x0080, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
+       { IMX93_CLK_A55,                "a55_root",             0x0100, FAST_SEL, CLK_IS_CRITICAL },
+       { IMX93_CLK_M33,                "m33_root",             0x0180, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
+       { IMX93_CLK_BUS_WAKEUP,         "bus_wakeup_root",      0x0280, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
+       { IMX93_CLK_BUS_AON,            "bus_aon_root",         0x0300, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
+       { IMX93_CLK_WAKEUP_AXI,         "wakeup_axi_root",      0x0380, FAST_SEL, CLK_IS_CRITICAL },
+       { IMX93_CLK_SWO_TRACE,          "swo_trace_root",       0x0400, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_M33_SYSTICK,        "m33_systick_root",     0x0480, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_FLEXIO1,            "flexio1_root",         0x0500, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_FLEXIO2,            "flexio2_root",         0x0580, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPIT1,              "lpit1_root",           0x0600, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPIT2,              "lpit2_root",           0x0680, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPTMR1,             "lptmr1_root",          0x0700, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPTMR2,             "lptmr2_root",          0x0780, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_TPM1,               "tpm1_root",            0x0800, TPM_SEL, },
+       { IMX93_CLK_TPM2,               "tpm2_root",            0x0880, TPM_SEL, },
+       { IMX93_CLK_TPM3,               "tpm3_root",            0x0900, TPM_SEL, },
+       { IMX93_CLK_TPM4,               "tpm4_root",            0x0980, TPM_SEL, },
+       { IMX93_CLK_TPM5,               "tpm5_root",            0x0a00, TPM_SEL, },
+       { IMX93_CLK_TPM6,               "tpm6_root",            0x0a80, TPM_SEL, },
+       { IMX93_CLK_FLEXSPI1,           "flexspi1_root",        0x0b00, FAST_SEL, },
+       { IMX93_CLK_CAN1,               "can1_root",            0x0b80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_CAN2,               "can2_root",            0x0c00, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPUART1,            "lpuart1_root",         0x0c80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPUART2,            "lpuart2_root",         0x0d00, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPUART3,            "lpuart3_root",         0x0d80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPUART4,            "lpuart4_root",         0x0e00, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPUART5,            "lpuart5_root",         0x0e80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPUART6,            "lpuart6_root",         0x0f00, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPUART7,            "lpuart7_root",         0x0f80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPUART8,            "lpuart8_root",         0x1000, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPI2C1,             "lpi2c1_root",          0x1080, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPI2C2,             "lpi2c2_root",          0x1100, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPI2C3,             "lpi2c3_root",          0x1180, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPI2C4,             "lpi2c4_root",          0x1200, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPI2C5,             "lpi2c5_root",          0x1280, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPI2C6,             "lpi2c6_root",          0x1300, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPI2C7,             "lpi2c7_root",          0x1380, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPI2C8,             "lpi2c8_root",          0x1400, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPSPI1,             "lpspi1_root",          0x1480, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPSPI2,             "lpspi2_root",          0x1500, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPSPI3,             "lpspi3_root",          0x1580, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPSPI4,             "lpspi4_root",          0x1600, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPSPI5,             "lpspi5_root",          0x1680, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPSPI6,             "lpspi6_root",          0x1700, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPSPI7,             "lpspi7_root",          0x1780, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_LPSPI8,             "lpspi8_root",          0x1800, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_I3C1,               "i3c1_root",            0x1880, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_I3C2,               "i3c2_root",            0x1900, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_USDHC1,             "usdhc1_root",          0x1980, FAST_SEL, },
+       { IMX93_CLK_USDHC2,             "usdhc2_root",          0x1a00, FAST_SEL, },
+       { IMX93_CLK_USDHC3,             "usdhc3_root",          0x1a80, FAST_SEL, },
+       { IMX93_CLK_SAI1,               "sai1_root",            0x1b00, AUDIO_SEL, },
+       { IMX93_CLK_SAI2,               "sai2_root",            0x1b80, AUDIO_SEL, },
+       { IMX93_CLK_SAI3,               "sai3_root",            0x1c00, AUDIO_SEL, },
+       { IMX93_CLK_CCM_CKO1,           "ccm_cko1_root",        0x1c80, CKO1_SEL, },
+       { IMX93_CLK_CCM_CKO2,           "ccm_cko2_root",        0x1d00, CKO2_SEL, },
+       { IMX93_CLK_CCM_CKO3,           "ccm_cko3_root",        0x1d80, CKO1_SEL, },
+       { IMX93_CLK_CCM_CKO4,           "ccm_cko4_root",        0x1e00, CKO2_SEL, },
+       { IMX93_CLK_HSIO,               "hsio_root",            0x1e80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_HSIO_USB_TEST_60M,  "hsio_usb_test_60m_root", 0x1f00, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_HSIO_ACSCAN_80M,    "hsio_acscan_80m_root", 0x1f80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_HSIO_ACSCAN_480M,   "hsio_acscan_480m_root", 0x2000, MISC_SEL, },
+       { IMX93_CLK_ML_APB,             "ml_apb_root",          0x2180, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_ML,                 "ml_root",              0x2200, FAST_SEL, },
+       { IMX93_CLK_MEDIA_AXI,          "media_axi_root",       0x2280, FAST_SEL, },
+       { IMX93_CLK_MEDIA_APB,          "media_apb_root",       0x2300, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_MEDIA_LDB,          "media_ldb_root",       0x2380, VIDEO_SEL, },
+       { IMX93_CLK_MEDIA_DISP_PIX,     "media_disp_pix_root",  0x2400, VIDEO_SEL, },
+       { IMX93_CLK_CAM_PIX,            "cam_pix_root",         0x2480, VIDEO_SEL, },
+       { IMX93_CLK_MIPI_TEST_BYTE,     "mipi_test_byte_root",  0x2500, VIDEO_SEL, },
+       { IMX93_CLK_MIPI_PHY_CFG,       "mipi_phy_cfg_root",    0x2580, VIDEO_SEL, },
+       { IMX93_CLK_ADC,                "adc_root",             0x2700, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_PDM,                "pdm_root",             0x2780, AUDIO_SEL, },
+       { IMX93_CLK_TSTMR1,             "tstmr1_root",          0x2800, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_TSTMR2,             "tstmr2_root",          0x2880, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_MQS1,               "mqs1_root",            0x2900, AUDIO_SEL, },
+       { IMX93_CLK_MQS2,               "mqs2_root",            0x2980, AUDIO_SEL, },
+       { IMX93_CLK_AUDIO_XCVR,         "audio_xcvr_root",      0x2a00, NON_IO_SEL, },
+       { IMX93_CLK_SPDIF,              "spdif_root",           0x2a80, AUDIO_SEL, },
+       { IMX93_CLK_ENET,               "enet_root",            0x2b00, NON_IO_SEL, },
+       { IMX93_CLK_ENET_TIMER1,        "enet_timer1_root",     0x2b80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_ENET_TIMER2,        "enet_timer2_root",     0x2c00, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_ENET_REF,           "enet_ref_root",        0x2c80, NON_IO_SEL, },
+       { IMX93_CLK_ENET_REF_PHY,       "enet_ref_phy_root",    0x2d00, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_I3C1_SLOW,          "i3c1_slow_root",       0x2d80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_I3C2_SLOW,          "i3c2_slow_root",       0x2e00, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_USB_PHY_BURUNIN,    "usb_phy_root",         0x2e80, LOW_SPEED_IO_SEL, },
+       { IMX93_CLK_PAL_CAME_SCAN,      "pal_came_scan_root",   0x2f00, MISC_SEL, }
+};
+
+static const struct imx93_clk_ccgr {
+       u32 clk;
+       char *name;
+       char *parent_name;
+       u32 off;
+       unsigned long flags;
+} ccgr_array[] = {
+       { IMX93_CLK_A55_GATE,           "a55",          "a55_root",             0x8000, },
+       /* M33 critical clk for system run */
+       { IMX93_CLK_CM33_GATE,          "cm33",         "m33_root",             0x8040, CLK_IS_CRITICAL },
+       { IMX93_CLK_ADC1_GATE,          "adc1",         "osc_24m",              0x82c0, },
+       { IMX93_CLK_WDOG1_GATE,         "wdog1",        "osc_24m",              0x8300, },
+       { IMX93_CLK_WDOG2_GATE,         "wdog2",        "osc_24m",              0x8340, },
+       { IMX93_CLK_WDOG3_GATE,         "wdog3",        "osc_24m",              0x8380, },
+       { IMX93_CLK_WDOG4_GATE,         "wdog4",        "osc_24m",              0x83c0, },
+       { IMX93_CLK_WDOG5_GATE,         "wdog5",        "osc_24m",              0x8400, },
+       { IMX93_CLK_SEMA1_GATE,         "sema1",        "bus_aon_root",         0x8440, },
+       { IMX93_CLK_SEMA2_GATE,         "sema2",        "bus_wakeup_root",      0x8480, },
+       { IMX93_CLK_MU_A_GATE,          "mu_a",         "bus_aon_root",         0x84c0, },
+       { IMX93_CLK_MU_B_GATE,          "mu_b",         "bus_aon_root",         0x8500, },
+       { IMX93_CLK_EDMA1_GATE,         "edma1",        "wakeup_axi_root",      0x8540, },
+       { IMX93_CLK_EDMA2_GATE,         "edma2",        "wakeup_axi_root",      0x8580, },
+       { IMX93_CLK_FLEXSPI1_GATE,      "flexspi",      "flexspi_root",         0x8640, },
+       { IMX93_CLK_GPIO1_GATE,         "gpio1",        "m33_root",             0x8880, },
+       { IMX93_CLK_GPIO2_GATE,         "gpio2",        "bus_wakeup_root",      0x88c0, },
+       { IMX93_CLK_GPIO3_GATE,         "gpio3",        "bus_wakeup_root",      0x8900, },
+       { IMX93_CLK_GPIO4_GATE,         "gpio4",        "bus_wakeup_root",      0x8940, },
+       { IMX93_CLK_FLEXIO1_GATE,       "flexio1",      "flexio1_root",         0x8980, },
+       { IMX93_CLK_FLEXIO2_GATE,       "flexio2",      "flexio2_root",         0x89c0, },
+       { IMX93_CLK_LPIT1_GATE,         "lpit1",        "lpit1_root",           0x8a00, },
+       { IMX93_CLK_LPIT2_GATE,         "lpit2",        "lpit2_root",           0x8a40, },
+       { IMX93_CLK_LPTMR1_GATE,        "lptmr1",       "lptmr1_root",          0x8a80, },
+       { IMX93_CLK_LPTMR2_GATE,        "lptmr2",       "lptmr2_root",          0x8ac0, },
+       { IMX93_CLK_TPM1_GATE,          "tpm1",         "tpm1_root",            0x8b00, },
+       { IMX93_CLK_TPM2_GATE,          "tpm2",         "tpm2_root",            0x8b40, },
+       { IMX93_CLK_TPM3_GATE,          "tpm3",         "tpm3_root",            0x8b80, },
+       { IMX93_CLK_TPM4_GATE,          "tpm4",         "tpm4_root",            0x8bc0, },
+       { IMX93_CLK_TPM5_GATE,          "tpm5",         "tpm5_root",            0x8c00, },
+       { IMX93_CLK_TPM6_GATE,          "tpm6",         "tpm6_root",            0x8c40, },
+       { IMX93_CLK_CAN1_GATE,          "can1",         "can1_root",            0x8c80, },
+       { IMX93_CLK_CAN2_GATE,          "can2",         "can2_root",            0x8cc0, },
+       { IMX93_CLK_LPUART1_GATE,       "lpuart1",      "lpuart1_root",         0x8d00, },
+       { IMX93_CLK_LPUART2_GATE,       "lpuart2",      "lpuart2_root",         0x8d40, },
+       { IMX93_CLK_LPUART3_GATE,       "lpuart3",      "lpuart3_root",         0x8d80, },
+       { IMX93_CLK_LPUART4_GATE,       "lpuart4",      "lpuart4_root",         0x8dc0, },
+       { IMX93_CLK_LPUART5_GATE,       "lpuart5",      "lpuart5_root",         0x8e00, },
+       { IMX93_CLK_LPUART6_GATE,       "lpuart6",      "lpuart6_root",         0x8e40, },
+       { IMX93_CLK_LPUART7_GATE,       "lpuart7",      "lpuart7_root",         0x8e80, },
+       { IMX93_CLK_LPUART8_GATE,       "lpuart8",      "lpuart8_root",         0x8ec0, },
+       { IMX93_CLK_LPI2C1_GATE,        "lpi2c1",       "lpi2c1_root",          0x8f00, },
+       { IMX93_CLK_LPI2C2_GATE,        "lpi2c2",       "lpi2c2_root",          0x8f40, },
+       { IMX93_CLK_LPI2C3_GATE,        "lpi2c3",       "lpi2c3_root",          0x8f80, },
+       { IMX93_CLK_LPI2C4_GATE,        "lpi2c4",       "lpi2c4_root",          0x8fc0, },
+       { IMX93_CLK_LPI2C5_GATE,        "lpi2c5",       "lpi2c5_root",          0x9000, },
+       { IMX93_CLK_LPI2C6_GATE,        "lpi2c6",       "lpi2c6_root",          0x9040, },
+       { IMX93_CLK_LPI2C7_GATE,        "lpi2c7",       "lpi2c7_root",          0x9080, },
+       { IMX93_CLK_LPI2C8_GATE,        "lpi2c8",       "lpi2c8_root",          0x90c0, },
+       { IMX93_CLK_LPSPI1_GATE,        "lpspi1",       "lpspi1_root",          0x9100, },
+       { IMX93_CLK_LPSPI2_GATE,        "lpspi2",       "lpspi2_root",          0x9140, },
+       { IMX93_CLK_LPSPI3_GATE,        "lpspi3",       "lpspi3_root",          0x9180, },
+       { IMX93_CLK_LPSPI4_GATE,        "lpspi4",       "lpspi4_root",          0x91c0, },
+       { IMX93_CLK_LPSPI5_GATE,        "lpspi5",       "lpspi5_root",          0x9200, },
+       { IMX93_CLK_LPSPI6_GATE,        "lpspi6",       "lpspi6_root",          0x9240, },
+       { IMX93_CLK_LPSPI7_GATE,        "lpspi7",       "lpspi7_root",          0x9280, },
+       { IMX93_CLK_LPSPI8_GATE,        "lpspi8",       "lpspi8_root",          0x92c0, },
+       { IMX93_CLK_I3C1_GATE,          "i3c1",         "i3c1_root",            0x9300, },
+       { IMX93_CLK_I3C2_GATE,          "i3c2",         "i3c2_root",            0x9340, },
+       { IMX93_CLK_USDHC1_GATE,        "usdhc1",       "usdhc1_root",          0x9380, },
+       { IMX93_CLK_USDHC2_GATE,        "usdhc2",       "usdhc2_root",          0x93c0, },
+       { IMX93_CLK_USDHC3_GATE,        "usdhc3",       "usdhc3_root",          0x9400, },
+       { IMX93_CLK_SAI1_GATE,          "sai1",         "sai1_root",            0x9440, },
+       { IMX93_CLK_SAI2_GATE,          "sai2",         "sai2_root",            0x9480, },
+       { IMX93_CLK_SAI3_GATE,          "sai3",         "sai3_root",            0x94c0, },
+       { IMX93_CLK_MIPI_CSI_GATE,      "mipi_csi",     "media_apb_root",       0x9580, },
+       { IMX93_CLK_MIPI_DSI_GATE,      "mipi_dsi",     "media_apb_root",       0x95c0, },
+       { IMX93_CLK_LVDS_GATE,          "lvds",         "media_ldb_root",       0x9600, },
+       { IMX93_CLK_LCDIF_GATE,         "lcdif",        "media_apb_root",       0x9640, },
+       { IMX93_CLK_PXP_GATE,           "pxp",          "media_apb_root",       0x9680, },
+       { IMX93_CLK_ISI_GATE,           "isi",          "media_apb_root",       0x96c0, },
+       { IMX93_CLK_NIC_MEDIA_GATE,     "nic_media",    "media_apb_root",       0x9700, },
+       { IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root",         0x9a00, },
+       { IMX93_CLK_USB_TEST_60M_GATE,  "usb_test_60m", "hsio_usb_test_60m_root", 0x9a40, },
+       { IMX93_CLK_HSIO_TROUT_24M_GATE, "hsio_trout_24m", "osc_24m",           0x9a80, },
+       { IMX93_CLK_PDM_GATE,           "pdm",          "pdm_root",             0x9ac0, },
+       { IMX93_CLK_MQS1_GATE,          "mqs1",         "sai1_root",            0x9b00, },
+       { IMX93_CLK_MQS2_GATE,          "mqs2",         "sai3_root",            0x9b40, },
+       { IMX93_CLK_AUD_XCVR_GATE,      "aud_xcvr",     "audio_xcvr_root",      0x9b80, },
+       { IMX93_CLK_SPDIF_GATE,         "spdif",        "spdif_root",           0x9c00, },
+       { IMX93_CLK_HSIO_32K_GATE,      "hsio_32k",     "osc_32k",              0x9dc0, },
+       { IMX93_CLK_ENET1_GATE,         "enet1",        "enet_root",            0x9e00, },
+       { IMX93_CLK_ENET_QOS_GATE,      "enet_qos",     "wakeup_axi_root",      0x9e40, },
+       { IMX93_CLK_SYS_CNT_GATE,       "sys_cnt",      "osc_24m",              0x9e80, },
+       { IMX93_CLK_TSTMR1_GATE,        "tstmr1",       "bus_aon_root",         0x9ec0, },
+       { IMX93_CLK_TSTMR2_GATE,        "tstmr2",       "bus_wakeup_root",      0x9f00, },
+       { IMX93_CLK_TMC_GATE,           "tmc",          "osc_24m",              0x9f40, },
+       { IMX93_CLK_PMRO_GATE,          "pmro",         "osc_24m",              0x9f80, }
+};
+
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **clks;
+
+static int imx93_clocks_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       const struct imx93_clk_root *root;
+       const struct imx93_clk_ccgr *ccgr;
+       void __iomem *base = NULL;
+       int i, ret;
+
+       clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+                                         IMX93_CLK_END), GFP_KERNEL);
+       if (!clk_hw_data)
+               return -ENOMEM;
+
+       clk_hw_data->num = IMX93_CLK_END;
+       clks = clk_hw_data->hws;
+
+       clks[IMX93_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+       clks[IMX93_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+       clks[IMX93_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+       clks[IMX93_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+
+       clks[IMX93_CLK_SYS_PLL_PFD0] = imx_clk_hw_fixed("sys_pll_pfd0", 1000000000);
+       clks[IMX93_CLK_SYS_PLL_PFD0_DIV2] = imx_clk_hw_fixed_factor("sys_pll_pfd0_div2",
+                                                                   "sys_pll_pfd0", 1, 2);
+       clks[IMX93_CLK_SYS_PLL_PFD1] = imx_clk_hw_fixed("sys_pll_pfd1", 800000000);
+       clks[IMX93_CLK_SYS_PLL_PFD1_DIV2] = imx_clk_hw_fixed_factor("sys_pll_pfd1_div2",
+                                                                   "sys_pll_pfd1", 1, 2);
+       clks[IMX93_CLK_SYS_PLL_PFD2] = imx_clk_hw_fixed("sys_pll_pfd2", 625000000);
+       clks[IMX93_CLK_SYS_PLL_PFD2_DIV2] = imx_clk_hw_fixed_factor("sys_pll_pfd2_div2",
+                                                                   "sys_pll_pfd2", 1, 2);
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,imx93-anatop");
+       base = of_iomap(np, 0);
+       of_node_put(np);
+       if (WARN_ON(!base))
+               return -ENOMEM;
+
+       clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", base + 0x1200,
+                                                       &imx_fracn_gppll);
+       clks[IMX93_CLK_VIDEO_PLL] = imx_clk_fracn_gppll("video_pll", "osc_24m", base + 0x1400,
+                                                       &imx_fracn_gppll);
+
+       np = dev->of_node;
+       base = devm_platform_ioremap_resource(pdev, 0);
+       if (WARN_ON(IS_ERR(base)))
+               return PTR_ERR(base);
+
+       for (i = 0; i < ARRAY_SIZE(root_array); i++) {
+               root = &root_array[i];
+               clks[root->clk] = imx93_clk_composite_flags(root->name,
+                                                           parent_names[root->sel],
+                                                           4, base + root->off,
+                                                           root->flags);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(ccgr_array); i++) {
+               ccgr = &ccgr_array[i];
+               clks[ccgr->clk] = imx_clk_hw_gate4_flags(ccgr->name,
+                                                        ccgr->parent_name,
+                                                        base + ccgr->off, 0,
+                                                        ccgr->flags);
+       }
+
+       imx_check_clk_hws(clks, IMX93_CLK_END);
+
+       ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+       if (ret < 0) {
+               dev_err(dev, "failed to register clks for i.MX93\n");
+               goto unregister_hws;
+       }
+
+       return 0;
+
+unregister_hws:
+       imx_unregister_hw_clocks(clks, IMX93_CLK_END);
+
+       return ret;
+}
+
+static const struct of_device_id imx93_clk_of_match[] = {
+       { .compatible = "fsl,imx93-ccm" },
+       { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx93_clk_of_match);
+
+static struct platform_driver imx93_clk_driver = {
+       .probe = imx93_clocks_probe,
+       .driver = {
+               .name = "imx93-ccm",
+               .suppress_bind_attrs = true,
+               .of_match_table = of_match_ptr(imx93_clk_of_match),
+       },
+};
+module_platform_driver(imx93_clk_driver);
+
+MODULE_DESCRIPTION("NXP i.MX93 clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
new file mode 100644 (file)
index 0000000..9539d35
--- /dev/null
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2021
+ * Author(s):
+ * Jesse Taube <Mr.Bossman075@gmail.com>
+ * Giulio Benetti <giulio.benetti@benettiengineering.com>
+ */
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/imxrt1050-clock.h>
+
+#include "clk.h"
+
+static const char * const pll_ref_sels[] = {"osc", "dummy", };
+static const char * const per_sels[] = {"ipg_pdof", "osc", };
+static const char * const pll1_bypass_sels[] = {"pll1_arm", "pll1_arm_ref_sel", };
+static const char * const pll2_bypass_sels[] = {"pll2_sys", "pll2_sys_ref_sel", };
+static const char * const pll3_bypass_sels[] = {"pll3_usb_otg", "pll3_usb_otg_ref_sel", };
+static const char * const pll5_bypass_sels[] = {"pll5_video", "pll5_video_ref_sel", };
+static const char *const pre_periph_sels[] = {
+       "pll2_sys", "pll2_pfd2_396m", "pll2_pfd0_352m", "arm_podf", };
+static const char *const periph_sels[] = { "pre_periph_sel", "todo", };
+static const char *const usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *const lpuart_sels[] = { "pll3_80m", "osc", };
+static const char *const lcdif_sels[] = {
+       "pll2_sys", "pll3_pfd3_454_74m", "pll5_video", "pll2_pfd0_352m",
+       "pll2_pfd1_594m", "pll3_pfd1_664_62m", };
+static const char *const semc_alt_sels[] = { "pll2_pfd2_396m", "pll3_pfd1_664_62m", };
+static const char *const semc_sels[] = { "periph_sel", "semc_alt_sel", };
+
+static struct clk_hw **hws;
+static struct clk_hw_onecell_data *clk_hw_data;
+
+static int imxrt1050_clocks_probe(struct platform_device *pdev)
+{
+       void __iomem *ccm_base;
+       void __iomem *pll_base;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *anp;
+       int ret;
+
+       clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+                                         IMXRT1050_CLK_END), GFP_KERNEL);
+       if (WARN_ON(!clk_hw_data))
+               return -ENOMEM;
+
+       clk_hw_data->num = IMXRT1050_CLK_END;
+       hws = clk_hw_data->hws;
+
+       hws[IMXRT1050_CLK_OSC] = imx_obtain_fixed_clk_hw(np, "osc");
+
+       anp = of_find_compatible_node(NULL, NULL, "fsl,imxrt-anatop");
+       pll_base = of_iomap(anp, 0);
+       of_node_put(anp);
+       if (WARN_ON(!pll_base))
+               return -ENOMEM;
+
+       /* Anatop clocks */
+       hws[IMXRT1050_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0UL);
+
+       hws[IMXRT1050_CLK_PLL1_REF_SEL] = imx_clk_hw_mux("pll1_arm_ref_sel",
+               pll_base + 0x0, 14, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMXRT1050_CLK_PLL2_REF_SEL] = imx_clk_hw_mux("pll2_sys_ref_sel",
+               pll_base + 0x30, 14, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMXRT1050_CLK_PLL3_REF_SEL] = imx_clk_hw_mux("pll3_usb_otg_ref_sel",
+               pll_base + 0x10, 14, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+       hws[IMXRT1050_CLK_PLL5_REF_SEL] = imx_clk_hw_mux("pll5_video_ref_sel",
+               pll_base + 0xa0, 14, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+       hws[IMXRT1050_CLK_PLL1_ARM] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll1_arm",
+               "pll1_arm_ref_sel", pll_base + 0x0, 0x7f);
+       hws[IMXRT1050_CLK_PLL2_SYS] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll2_sys",
+               "pll2_sys_ref_sel", pll_base + 0x30, 0x1);
+       hws[IMXRT1050_CLK_PLL3_USB_OTG] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll3_usb_otg",
+               "pll3_usb_otg_ref_sel", pll_base + 0x10, 0x1);
+       hws[IMXRT1050_CLK_PLL5_VIDEO] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll5_video",
+               "pll5_video_ref_sel", pll_base + 0xa0, 0x7f);
+
+       /* PLL bypass out */
+       hws[IMXRT1050_CLK_PLL1_BYPASS] = imx_clk_hw_mux_flags("pll1_bypass", pll_base + 0x0, 16, 1,
+               pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMXRT1050_CLK_PLL2_BYPASS] = imx_clk_hw_mux_flags("pll2_bypass", pll_base + 0x30, 16, 1,
+               pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMXRT1050_CLK_PLL3_BYPASS] = imx_clk_hw_mux_flags("pll3_bypass", pll_base + 0x10, 16, 1,
+               pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
+       hws[IMXRT1050_CLK_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", pll_base + 0xa0, 16, 1,
+               pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
+
+       hws[IMXRT1050_CLK_VIDEO_POST_DIV_SEL] = imx_clk_hw_divider("video_post_div_sel",
+               "pll5_video", pll_base + 0xa0, 19, 2);
+       hws[IMXRT1050_CLK_VIDEO_DIV] = imx_clk_hw_divider("video_div",
+               "video_post_div_sel", pll_base + 0x170, 30, 2);
+
+       hws[IMXRT1050_CLK_PLL3_80M] = imx_clk_hw_fixed_factor("pll3_80m",  "pll3_usb_otg", 1, 6);
+
+       hws[IMXRT1050_CLK_PLL2_PFD0_352M] = imx_clk_hw_pfd("pll2_pfd0_352m", "pll2_sys", pll_base + 0x100, 0);
+       hws[IMXRT1050_CLK_PLL2_PFD1_594M] = imx_clk_hw_pfd("pll2_pfd1_594m", "pll2_sys", pll_base + 0x100, 1);
+       hws[IMXRT1050_CLK_PLL2_PFD2_396M] = imx_clk_hw_pfd("pll2_pfd2_396m", "pll2_sys", pll_base + 0x100, 2);
+       hws[IMXRT1050_CLK_PLL3_PFD1_664_62M] = imx_clk_hw_pfd("pll3_pfd1_664_62m", "pll3_usb_otg", pll_base + 0xf0, 1);
+       hws[IMXRT1050_CLK_PLL3_PFD3_454_74M] = imx_clk_hw_pfd("pll3_pfd3_454_74m", "pll3_usb_otg", pll_base + 0xf0, 3);
+
+       /* CCM clocks */
+       ccm_base = devm_platform_ioremap_resource(pdev, 0);
+       if (WARN_ON(IS_ERR(ccm_base)))
+               return PTR_ERR(ccm_base);
+
+       hws[IMXRT1050_CLK_ARM_PODF] = imx_clk_hw_divider("arm_podf", "pll1_arm", ccm_base + 0x10, 0, 3);
+       hws[IMXRT1050_CLK_PRE_PERIPH_SEL] = imx_clk_hw_mux("pre_periph_sel", ccm_base + 0x18, 18, 2,
+               pre_periph_sels, ARRAY_SIZE(pre_periph_sels));
+       hws[IMXRT1050_CLK_PERIPH_SEL] = imx_clk_hw_mux("periph_sel", ccm_base + 0x14, 25, 1,
+               periph_sels, ARRAY_SIZE(periph_sels));
+       hws[IMXRT1050_CLK_USDHC1_SEL] = imx_clk_hw_mux("usdhc1_sel", ccm_base + 0x1c, 16, 1,
+               usdhc_sels, ARRAY_SIZE(usdhc_sels));
+       hws[IMXRT1050_CLK_USDHC2_SEL] = imx_clk_hw_mux("usdhc2_sel", ccm_base + 0x1c, 17, 1,
+               usdhc_sels, ARRAY_SIZE(usdhc_sels));
+       hws[IMXRT1050_CLK_LPUART_SEL] = imx_clk_hw_mux("lpuart_sel", ccm_base + 0x24, 6, 1,
+               lpuart_sels, ARRAY_SIZE(lpuart_sels));
+       hws[IMXRT1050_CLK_LCDIF_SEL] = imx_clk_hw_mux("lcdif_sel", ccm_base + 0x38, 15, 3,
+               lcdif_sels, ARRAY_SIZE(lcdif_sels));
+       hws[IMXRT1050_CLK_PER_CLK_SEL] = imx_clk_hw_mux("per_sel", ccm_base + 0x1C, 6, 1,
+               per_sels, ARRAY_SIZE(per_sels));
+       hws[IMXRT1050_CLK_SEMC_ALT_SEL] = imx_clk_hw_mux("semc_alt_sel", ccm_base + 0x14, 7, 1,
+               semc_alt_sels, ARRAY_SIZE(semc_alt_sels));
+       hws[IMXRT1050_CLK_SEMC_SEL] = imx_clk_hw_mux_flags("semc_sel", ccm_base + 0x14, 6, 1,
+               semc_sels, ARRAY_SIZE(semc_sels), CLK_IS_CRITICAL);
+
+       hws[IMXRT1050_CLK_AHB_PODF] = imx_clk_hw_divider("ahb", "periph_sel", ccm_base + 0x14, 10, 3);
+       hws[IMXRT1050_CLK_IPG_PDOF] = imx_clk_hw_divider("ipg", "ahb", ccm_base + 0x14, 8, 2);
+       hws[IMXRT1050_CLK_PER_PDOF] = imx_clk_hw_divider("per", "per_sel", ccm_base + 0x1C, 0, 5);
+
+       hws[IMXRT1050_CLK_USDHC1_PODF] = imx_clk_hw_divider("usdhc1_podf", "usdhc1_sel", ccm_base + 0x24, 11, 3);
+       hws[IMXRT1050_CLK_USDHC2_PODF] = imx_clk_hw_divider("usdhc2_podf", "usdhc2_sel", ccm_base + 0x24, 16, 3);
+       hws[IMXRT1050_CLK_LPUART_PODF] = imx_clk_hw_divider("lpuart_podf", "lpuart_sel", ccm_base + 0x24, 0, 6);
+       hws[IMXRT1050_CLK_LCDIF_PRED] = imx_clk_hw_divider("lcdif_pred", "lcdif_sel", ccm_base + 0x38, 12, 3);
+       hws[IMXRT1050_CLK_LCDIF_PODF] = imx_clk_hw_divider("lcdif_podf", "lcdif_pred", ccm_base + 0x18, 23, 3);
+
+       hws[IMXRT1050_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", ccm_base + 0x80, 2);
+       hws[IMXRT1050_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", ccm_base + 0x80, 4);
+       hws[IMXRT1050_CLK_LPUART1] = imx_clk_hw_gate2("lpuart1", "lpuart_podf", ccm_base + 0x7c, 24);
+       hws[IMXRT1050_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif", "lcdif_podf", ccm_base + 0x74, 10);
+       hws[IMXRT1050_CLK_DMA] = imx_clk_hw_gate("dma", "ipg", ccm_base + 0x7C, 6);
+       hws[IMXRT1050_CLK_DMA_MUX] = imx_clk_hw_gate("dmamux0", "ipg", ccm_base + 0x7C, 7);
+       imx_check_clk_hws(hws, IMXRT1050_CLK_END);
+
+       ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+       if (ret < 0) {
+               dev_err(dev, "Failed to register clks for i.MXRT1050.\n");
+               imx_unregister_hw_clocks(hws, IMXRT1050_CLK_END);
+       }
+       return ret;
+}
+static const struct of_device_id imxrt1050_clk_of_match[] = {
+       { .compatible = "fsl,imxrt1050-ccm" },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imxrt1050_clk_of_match);
+
+static struct platform_driver imxrt1050_clk_driver = {
+       .probe = imxrt1050_clocks_probe,
+       .driver = {
+               .name = "imxrt1050-ccm",
+               .of_match_table = imxrt1050_clk_of_match,
+       },
+};
+module_platform_driver(imxrt1050_clk_driver);
index 2b5ed86..1d0f79e 100644 (file)
@@ -3,6 +3,9 @@
  * Copyright 2017-2018 NXP.
  */
 
+#define pr_fmt(fmt) "pll14xx: " fmt
+
+#include <linux/bitfield.h>
 #include <linux/bits.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include "clk.h"
 
 #define GNRL_CTL       0x0
-#define DIV_CTL                0x4
+#define DIV_CTL0       0x4
+#define DIV_CTL1       0x8
 #define LOCK_STATUS    BIT(31)
 #define LOCK_SEL_MASK  BIT(29)
 #define CLKE_MASK      BIT(11)
 #define RST_MASK       BIT(9)
 #define BYPASS_MASK    BIT(4)
-#define MDIV_SHIFT     12
 #define MDIV_MASK      GENMASK(21, 12)
-#define PDIV_SHIFT     4
 #define PDIV_MASK      GENMASK(9, 4)
-#define SDIV_SHIFT     0
 #define SDIV_MASK      GENMASK(2, 0)
-#define KDIV_SHIFT     0
 #define KDIV_MASK      GENMASK(15, 0)
+#define KDIV_MIN       SHRT_MIN
+#define KDIV_MAX       SHRT_MAX
 
 #define LOCK_TIMEOUT_US                10000
 
@@ -99,62 +101,165 @@ static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
        return NULL;
 }
 
-static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate,
+static long pll14xx_calc_rate(struct clk_pll14xx *pll, int mdiv, int pdiv,
+                             int sdiv, int kdiv, unsigned long prate)
+{
+       u64 fvco = prate;
+
+       /* fvco = (m * 65536 + k) * Fin / (p * 65536) */
+       fvco *= (mdiv * 65536 + kdiv);
+       pdiv *= 65536;
+
+       do_div(fvco, pdiv << sdiv);
+
+       return fvco;
+}
+
+static long pll1443x_calc_kdiv(int mdiv, int pdiv, int sdiv,
+               unsigned long rate, unsigned long prate)
+{
+       long kdiv;
+
+       /* calc kdiv = round(rate * pdiv * 65536 * 2^sdiv / prate) - (mdiv * 65536) */
+       kdiv = ((rate * ((pdiv * 65536) << sdiv) + prate / 2) / prate) - (mdiv * 65536);
+
+       return clamp_t(short, kdiv, KDIV_MIN, KDIV_MAX);
+}
+
+static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rate,
+                                     unsigned long prate, struct imx_pll14xx_rate_table *t)
+{
+       u32 pll_div_ctl0, pll_div_ctl1;
+       int mdiv, pdiv, sdiv, kdiv;
+       long fvco, rate_min, rate_max, dist, best = LONG_MAX;
+       const struct imx_pll14xx_rate_table *tt;
+
+       /*
+        * Fractional PLL constrains:
+        *
+        * a) 6MHz <= prate <= 25MHz
+        * b) 1 <= p <= 63 (1 <= p <= 4 prate = 24MHz)
+        * c) 64 <= m <= 1023
+        * d) 0 <= s <= 6
+        * e) -32768 <= k <= 32767
+        *
+        * fvco = (m * 65536 + k) * prate / (p * 65536)
+        */
+
+       /* First try if we can get the desired rate from one of the static entries */
+       tt = imx_get_pll_settings(pll, rate);
+       if (tt) {
+               pr_debug("%s: in=%ld, want=%ld, Using PLL setting from table\n",
+                        clk_hw_get_name(&pll->hw), prate, rate);
+               t->rate = tt->rate;
+               t->mdiv = tt->mdiv;
+               t->pdiv = tt->pdiv;
+               t->sdiv = tt->sdiv;
+               t->kdiv = tt->kdiv;
+               return;
+       }
+
+       pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
+       mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0);
+       pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0);
+       sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0);
+       pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
+
+       /* Then see if we can get the desired rate by only adjusting kdiv (glitch free) */
+       rate_min = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MIN, prate);
+       rate_max = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MAX, prate);
+
+       if (rate >= rate_min && rate <= rate_max) {
+               kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate);
+               pr_debug("%s: in=%ld, want=%ld Only adjust kdiv %ld -> %d\n",
+                        clk_hw_get_name(&pll->hw), prate, rate,
+                        FIELD_GET(KDIV_MASK, pll_div_ctl1), kdiv);
+               fvco = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
+               t->rate = (unsigned int)fvco;
+               t->mdiv = mdiv;
+               t->pdiv = pdiv;
+               t->sdiv = sdiv;
+               t->kdiv = kdiv;
+               return;
+       }
+
+       /* Finally calculate best values */
+       for (pdiv = 1; pdiv <= 7; pdiv++) {
+               for (sdiv = 0; sdiv <= 6; sdiv++) {
+                       /* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */
+                       mdiv = DIV_ROUND_CLOSEST(rate * (pdiv << sdiv), prate);
+                       mdiv = clamp(mdiv, 64, 1023);
+
+                       kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate);
+                       fvco = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
+
+                       /* best match */
+                       dist = abs((long)rate - (long)fvco);
+                       if (dist < best) {
+                               best = dist;
+                               t->rate = (unsigned int)fvco;
+                               t->mdiv = mdiv;
+                               t->pdiv = pdiv;
+                               t->sdiv = sdiv;
+                               t->kdiv = kdiv;
+
+                               if (!dist)
+                                       goto found;
+                       }
+               }
+       }
+found:
+       pr_debug("%s: in=%ld, want=%ld got=%d (pdiv=%d sdiv=%d mdiv=%d kdiv=%d)\n",
+                clk_hw_get_name(&pll->hw), prate, rate, t->rate, t->pdiv, t->sdiv,
+                t->mdiv, t->kdiv);
+}
+
+static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
                        unsigned long *prate)
 {
        struct clk_pll14xx *pll = to_clk_pll14xx(hw);
        const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
        int i;
 
-       /* Assumming rate_table is in descending order */
+       /* Assuming rate_table is in descending order */
        for (i = 0; i < pll->rate_count; i++)
                if (rate >= rate_table[i].rate)
                        return rate_table[i].rate;
 
        /* return minimum supported value */
-       return rate_table[i - 1].rate;
+       return rate_table[pll->rate_count - 1].rate;
 }
 
-static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
-                                                 unsigned long parent_rate)
+static long clk_pll1443x_round_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long *prate)
 {
        struct clk_pll14xx *pll = to_clk_pll14xx(hw);
-       u32 mdiv, pdiv, sdiv, pll_div;
-       u64 fvco = parent_rate;
+       struct imx_pll14xx_rate_table t;
 
-       pll_div = readl_relaxed(pll->base + 4);
-       mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
-       pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
-       sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT;
+       imx_pll14xx_calc_settings(pll, rate, *prate, &t);
 
-       fvco *= mdiv;
-       do_div(fvco, pdiv << sdiv);
-
-       return fvco;
+       return t.rate;
 }
 
-static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
+static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
                                                  unsigned long parent_rate)
 {
        struct clk_pll14xx *pll = to_clk_pll14xx(hw);
-       u32 mdiv, pdiv, sdiv, pll_div_ctl0, pll_div_ctl1;
-       short int kdiv;
-       u64 fvco = parent_rate;
-
-       pll_div_ctl0 = readl_relaxed(pll->base + 4);
-       pll_div_ctl1 = readl_relaxed(pll->base + 8);
-       mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
-       pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT;
-       sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT;
-       kdiv = pll_div_ctl1 & KDIV_MASK;
-
-       /* fvco = (m * 65536 + k) * Fin / (p * 65536) */
-       fvco *= (mdiv * 65536 + kdiv);
-       pdiv *= 65536;
-
-       do_div(fvco, pdiv << sdiv);
+       u32 mdiv, pdiv, sdiv, kdiv, pll_div_ctl0, pll_div_ctl1;
+
+       pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
+       mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0);
+       pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0);
+       sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0);
+
+       if (pll->type == PLL_1443X) {
+               pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
+               kdiv = FIELD_GET(KDIV_MASK, pll_div_ctl1);
+       } else {
+               kdiv = 0;
+       }
 
-       return fvco;
+       return pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, parent_rate);
 }
 
 static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate,
@@ -162,8 +267,8 @@ static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *ra
 {
        u32 old_mdiv, old_pdiv;
 
-       old_mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
-       old_pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
+       old_mdiv = FIELD_GET(MDIV_MASK, pll_div);
+       old_pdiv = FIELD_GET(PDIV_MASK, pll_div);
 
        return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
 }
@@ -172,7 +277,7 @@ static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
 {
        u32 val;
 
-       return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0,
+       return readl_poll_timeout(pll->base + GNRL_CTL, val, val & LOCK_STATUS, 0,
                        LOCK_TIMEOUT_US);
 }
 
@@ -186,37 +291,37 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
 
        rate = imx_get_pll_settings(pll, drate);
        if (!rate) {
-               pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-                      drate, clk_hw_get_name(hw));
+               pr_err("Invalid rate %lu for pll clk %s\n", drate,
+                      clk_hw_get_name(hw));
                return -EINVAL;
        }
 
-       tmp = readl_relaxed(pll->base + 4);
+       tmp = readl_relaxed(pll->base + DIV_CTL0);
 
        if (!clk_pll14xx_mp_change(rate, tmp)) {
-               tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
-               tmp |= rate->sdiv << SDIV_SHIFT;
-               writel_relaxed(tmp, pll->base + 4);
+               tmp &= ~SDIV_MASK;
+               tmp |= FIELD_PREP(SDIV_MASK, rate->sdiv);
+               writel_relaxed(tmp, pll->base + DIV_CTL0);
 
                return 0;
        }
 
        /* Bypass clock and set lock to pll output lock */
-       tmp = readl_relaxed(pll->base);
+       tmp = readl_relaxed(pll->base + GNRL_CTL);
        tmp |= LOCK_SEL_MASK;
-       writel_relaxed(tmp, pll->base);
+       writel_relaxed(tmp, pll->base + GNRL_CTL);
 
        /* Enable RST */
        tmp &= ~RST_MASK;
-       writel_relaxed(tmp, pll->base);
+       writel_relaxed(tmp, pll->base + GNRL_CTL);
 
        /* Enable BYPASS */
        tmp |= BYPASS_MASK;
-       writel(tmp, pll->base);
+       writel(tmp, pll->base + GNRL_CTL);
 
-       div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
-               (rate->sdiv << SDIV_SHIFT);
-       writel_relaxed(div_val, pll->base + 0x4);
+       div_val = FIELD_PREP(MDIV_MASK, rate->mdiv) | FIELD_PREP(PDIV_MASK, rate->pdiv) |
+               FIELD_PREP(SDIV_MASK, rate->sdiv);
+       writel_relaxed(div_val, pll->base + DIV_CTL0);
 
        /*
         * According to SPEC, t3 - t2 need to be greater than
@@ -228,7 +333,7 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
 
        /* Disable RST */
        tmp |= RST_MASK;
-       writel_relaxed(tmp, pll->base);
+       writel_relaxed(tmp, pll->base + GNRL_CTL);
 
        /* Wait Lock */
        ret = clk_pll14xx_wait_lock(pll);
@@ -237,7 +342,7 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
 
        /* Bypass */
        tmp &= ~BYPASS_MASK;
-       writel_relaxed(tmp, pll->base);
+       writel_relaxed(tmp, pll->base + GNRL_CTL);
 
        return 0;
 }
@@ -246,43 +351,41 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
                                 unsigned long prate)
 {
        struct clk_pll14xx *pll = to_clk_pll14xx(hw);
-       const struct imx_pll14xx_rate_table *rate;
-       u32 tmp, div_val;
+       struct imx_pll14xx_rate_table rate;
+       u32 gnrl_ctl, div_ctl0;
        int ret;
 
-       rate = imx_get_pll_settings(pll, drate);
-       if (!rate) {
-               pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-                       drate, clk_hw_get_name(hw));
-               return -EINVAL;
-       }
+       imx_pll14xx_calc_settings(pll, drate, prate, &rate);
 
-       tmp = readl_relaxed(pll->base + 4);
+       div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
 
-       if (!clk_pll14xx_mp_change(rate, tmp)) {
-               tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
-               tmp |= rate->sdiv << SDIV_SHIFT;
-               writel_relaxed(tmp, pll->base + 4);
+       if (!clk_pll14xx_mp_change(&rate, div_ctl0)) {
+               /* only sdiv and/or kdiv changed - no need to RESET PLL */
+               div_ctl0 &= ~SDIV_MASK;
+               div_ctl0 |= FIELD_PREP(SDIV_MASK, rate.sdiv);
+               writel_relaxed(div_ctl0, pll->base + DIV_CTL0);
 
-               tmp = rate->kdiv << KDIV_SHIFT;
-               writel_relaxed(tmp, pll->base + 8);
+               writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv),
+                              pll->base + DIV_CTL1);
 
                return 0;
        }
 
        /* Enable RST */
-       tmp = readl_relaxed(pll->base);
-       tmp &= ~RST_MASK;
-       writel_relaxed(tmp, pll->base);
+       gnrl_ctl = readl_relaxed(pll->base + GNRL_CTL);
+       gnrl_ctl &= ~RST_MASK;
+       writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
 
        /* Enable BYPASS */
-       tmp |= BYPASS_MASK;
-       writel_relaxed(tmp, pll->base);
+       gnrl_ctl |= BYPASS_MASK;
+       writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
 
-       div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
-               (rate->sdiv << SDIV_SHIFT);
-       writel_relaxed(div_val, pll->base + 0x4);
-       writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8);
+       div_ctl0 = FIELD_PREP(MDIV_MASK, rate.mdiv) |
+                  FIELD_PREP(PDIV_MASK, rate.pdiv) |
+                  FIELD_PREP(SDIV_MASK, rate.sdiv);
+       writel_relaxed(div_ctl0, pll->base + DIV_CTL0);
+
+       writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv), pll->base + DIV_CTL1);
 
        /*
         * According to SPEC, t3 - t2 need to be greater than
@@ -293,8 +396,8 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
        udelay(3);
 
        /* Disable RST */
-       tmp |= RST_MASK;
-       writel_relaxed(tmp, pll->base);
+       gnrl_ctl |= RST_MASK;
+       writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
 
        /* Wait Lock*/
        ret = clk_pll14xx_wait_lock(pll);
@@ -302,8 +405,8 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
                return ret;
 
        /* Bypass */
-       tmp &= ~BYPASS_MASK;
-       writel_relaxed(tmp, pll->base);
+       gnrl_ctl &= ~BYPASS_MASK;
+       writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
 
        return 0;
 }
@@ -364,21 +467,21 @@ static const struct clk_ops clk_pll1416x_ops = {
        .prepare        = clk_pll14xx_prepare,
        .unprepare      = clk_pll14xx_unprepare,
        .is_prepared    = clk_pll14xx_is_prepared,
-       .recalc_rate    = clk_pll1416x_recalc_rate,
-       .round_rate     = clk_pll14xx_round_rate,
+       .recalc_rate    = clk_pll14xx_recalc_rate,
+       .round_rate     = clk_pll1416x_round_rate,
        .set_rate       = clk_pll1416x_set_rate,
 };
 
 static const struct clk_ops clk_pll1416x_min_ops = {
-       .recalc_rate    = clk_pll1416x_recalc_rate,
+       .recalc_rate    = clk_pll14xx_recalc_rate,
 };
 
 static const struct clk_ops clk_pll1443x_ops = {
        .prepare        = clk_pll14xx_prepare,
        .unprepare      = clk_pll14xx_unprepare,
        .is_prepared    = clk_pll14xx_is_prepared,
-       .recalc_rate    = clk_pll1443x_recalc_rate,
-       .round_rate     = clk_pll14xx_round_rate,
+       .recalc_rate    = clk_pll14xx_recalc_rate,
+       .round_rate     = clk_pll1443x_round_rate,
        .set_rate       = clk_pll1443x_set_rate,
 };
 
@@ -412,8 +515,7 @@ struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
                init.ops = &clk_pll1443x_ops;
                break;
        default:
-               pr_err("%s: Unknown pll type for pll clk %s\n",
-                      __func__, name);
+               pr_err("Unknown pll type for pll clk %s\n", name);
                kfree(pll);
                return ERR_PTR(-EINVAL);
        }
@@ -432,8 +534,7 @@ struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
 
        ret = clk_hw_register(dev, hw);
        if (ret) {
-               pr_err("%s: failed to register pll %s %d\n",
-                       __func__, name, ret);
+               pr_err("failed to register pll %s %d\n", name, ret);
                kfree(pll);
                return ERR_PTR(ret);
        }
index 22156e9..af7b697 100644 (file)
@@ -21,6 +21,7 @@ struct imx_clk_scu_rsrc_table {
 
 extern struct list_head imx_scu_clks[];
 extern const struct dev_pm_ops imx_clk_lpcg_scu_pm_ops;
+extern const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8dxl;
 extern const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8qxp;
 extern const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8qm;
 
index 9d6cdff..81f304f 100644 (file)
@@ -525,7 +525,6 @@ struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
        init.parent_names = parent_names;
        init.num_parents = num_parents;
 
-       pll->base = base;
        pll->hw.init = &init;
 
        hw = &pll->hw;
index 7d220a0..a7cbbcd 100644 (file)
@@ -72,6 +72,27 @@ extern struct imx_pll14xx_clk imx_1416x_pll;
 extern struct imx_pll14xx_clk imx_1443x_pll;
 extern struct imx_pll14xx_clk imx_1443x_dram_pll;
 
+/* NOTE: Rate table should be kept sorted in descending order. */
+struct imx_fracn_gppll_rate_table {
+       unsigned int rate;
+       unsigned int mfi;
+       unsigned int mfn;
+       unsigned int mfd;
+       unsigned int rdiv;
+       unsigned int odiv;
+};
+
+struct imx_fracn_gppll_clk {
+       const struct imx_fracn_gppll_rate_table *rate_table;
+       int rate_count;
+       int flags;
+};
+
+struct clk_hw *imx_clk_fracn_gppll(const char *name, const char *parent_name, void __iomem *base,
+                                  const struct imx_fracn_gppll_clk *pll_clk);
+
+extern struct imx_fracn_gppll_clk imx_fracn_gppll;
+
 #define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
        to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
 
@@ -419,6 +440,15 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
                        IMX_COMPOSITE_FW_MANAGED, \
                        IMX_COMPOSITE_CLK_FLAGS_CRITICAL_GET_RATE_NO_CACHE)
 
+struct clk_hw *imx93_clk_composite_flags(const char *name,
+                                        const char * const *parent_names,
+                                        int num_parents,
+                                        void __iomem *reg,
+                                        unsigned long flags);
+#define imx93_clk_composite(name, parent_names, num_parents, reg) \
+       imx93_clk_composite_flags(name, parent_names, num_parents, reg, \
+                                 CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+
 struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
                unsigned long flags, void __iomem *reg, u8 shift, u8 width,
                u8 clk_divider_flags, const struct clk_div_table *table,
index 703f876..1ebf740 100644 (file)
@@ -37,6 +37,7 @@ static const struct clk_div_table ahb_div_table[] = {
        [1] = { .val = 1, .div = 4 },
        [2] = { .val = 2, .div = 3 },
        [3] = { .val = 3, .div = 3 },
+       [4] = { /* sentinel */ }
 };
 
 void __init ls1x_clk_init(void)
index caa9119..a29339c 100644 (file)
@@ -92,7 +92,7 @@ struct clk * __init mtk_clk_register_ref2usb_tx(const char *name,
        clk = clk_register(NULL, &tx->hw);
 
        if (IS_ERR(clk)) {
-               pr_err("Failed to register clk %s: %ld\n", name, PTR_ERR(clk));
+               pr_err("Failed to register clk %s: %pe\n", name, clk);
                kfree(tx);
        }
 
index e188018..c11b3fa 100644 (file)
@@ -5,13 +5,24 @@
  */
 
 #include <linux/clk-provider.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 
 #include "clk-mtk.h"
 #include "clk-cpumux.h"
 
+struct mtk_clk_cpumux {
+       struct clk_hw   hw;
+       struct regmap   *regmap;
+       u32             reg;
+       u32             mask;
+       u8              shift;
+};
+
 static inline struct mtk_clk_cpumux *to_mtk_clk_cpumux(struct clk_hw *_hw)
 {
        return container_of(_hw, struct mtk_clk_cpumux, hw);
@@ -77,6 +88,21 @@ mtk_clk_register_cpumux(const struct mtk_composite *mux,
        return clk;
 }
 
+static void mtk_clk_unregister_cpumux(struct clk *clk)
+{
+       struct mtk_clk_cpumux *cpumux;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       cpumux = to_mtk_clk_cpumux(hw);
+
+       clk_unregister(clk);
+       kfree(cpumux);
+}
+
 int mtk_clk_register_cpumuxes(struct device_node *node,
                              const struct mtk_composite *clks, int num,
                              struct clk_onecell_data *clk_data)
@@ -87,25 +113,58 @@ int mtk_clk_register_cpumuxes(struct device_node *node,
 
        regmap = device_node_to_regmap(node);
        if (IS_ERR(regmap)) {
-               pr_err("Cannot find regmap for %pOF: %ld\n", node,
-                      PTR_ERR(regmap));
+               pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
                return PTR_ERR(regmap);
        }
 
        for (i = 0; i < num; i++) {
                const struct mtk_composite *mux = &clks[i];
 
+               if (!IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
+                       pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
+                               node, mux->id);
+                       continue;
+               }
+
                clk = mtk_clk_register_cpumux(mux, regmap);
                if (IS_ERR(clk)) {
-                       pr_err("Failed to register clk %s: %ld\n",
-                              mux->name, PTR_ERR(clk));
-                       continue;
+                       pr_err("Failed to register clk %s: %pe\n", mux->name, clk);
+                       goto err;
                }
 
                clk_data->clks[mux->id] = clk;
        }
 
        return 0;
+
+err:
+       while (--i >= 0) {
+               const struct mtk_composite *mux = &clks[i];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[mux->id]))
+                       continue;
+
+               mtk_clk_unregister_cpumux(clk_data->clks[mux->id]);
+               clk_data->clks[mux->id] = ERR_PTR(-ENOENT);
+       }
+
+       return PTR_ERR(clk);
+}
+
+void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num,
+                                struct clk_onecell_data *clk_data)
+{
+       int i;
+
+       for (i = num; i > 0; i--) {
+               const struct mtk_composite *mux = &clks[i - 1];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[mux->id]))
+                       continue;
+
+               mtk_clk_unregister_cpumux(clk_data->clks[mux->id]);
+               clk_data->clks[mux->id] = ERR_PTR(-ENOENT);
+       }
 }
 
 MODULE_LICENSE("GPL");
index 2aaf1af..b07e89f 100644 (file)
@@ -7,16 +7,15 @@
 #ifndef __DRV_CLK_CPUMUX_H
 #define __DRV_CLK_CPUMUX_H
 
-struct mtk_clk_cpumux {
-       struct clk_hw   hw;
-       struct regmap   *regmap;
-       u32             reg;
-       u32             mask;
-       u8              shift;
-};
+struct clk_onecell_data;
+struct device_node;
+struct mtk_composite;
 
 int mtk_clk_register_cpumuxes(struct device_node *node,
                              const struct mtk_composite *clks, int num,
                              struct clk_onecell_data *clk_data);
 
+void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num,
+                                struct clk_onecell_data *clk_data);
+
 #endif /* __DRV_CLK_CPUMUX_H */
index 5d88b42..da52023 100644 (file)
@@ -4,18 +4,30 @@
  * Author: James Liao <jamesjj.liao@mediatek.com>
  */
 
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
 
-#include "clk-mtk.h"
 #include "clk-gate.h"
 
+struct mtk_clk_gate {
+       struct clk_hw   hw;
+       struct regmap   *regmap;
+       int             set_ofs;
+       int             clr_ofs;
+       int             sta_ofs;
+       u8              bit;
+};
+
+static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
+{
+       return container_of(hw, struct mtk_clk_gate, hw);
+}
+
 static u32 mtk_get_clockgating(struct clk_hw *hw)
 {
        struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
@@ -140,17 +152,12 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
 };
 EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
 
-struct clk *mtk_clk_register_gate(
-               const char *name,
-               const char *parent_name,
-               struct regmap *regmap,
-               int set_ofs,
-               int clr_ofs,
-               int sta_ofs,
-               u8 bit,
-               const struct clk_ops *ops,
-               unsigned long flags,
-               struct device *dev)
+static struct clk *mtk_clk_register_gate(const char *name,
+                                        const char *parent_name,
+                                        struct regmap *regmap, int set_ofs,
+                                        int clr_ofs, int sta_ofs, u8 bit,
+                                        const struct clk_ops *ops,
+                                        unsigned long flags, struct device *dev)
 {
        struct mtk_clk_gate *cg;
        struct clk *clk;
@@ -180,6 +187,107 @@ struct clk *mtk_clk_register_gate(
 
        return clk;
 }
-EXPORT_SYMBOL_GPL(mtk_clk_register_gate);
+
+static void mtk_clk_unregister_gate(struct clk *clk)
+{
+       struct mtk_clk_gate *cg;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       cg = to_mtk_clk_gate(hw);
+
+       clk_unregister(clk);
+       kfree(cg);
+}
+
+int mtk_clk_register_gates_with_dev(struct device_node *node,
+                                   const struct mtk_gate *clks, int num,
+                                   struct clk_onecell_data *clk_data,
+                                   struct device *dev)
+{
+       int i;
+       struct clk *clk;
+       struct regmap *regmap;
+
+       if (!clk_data)
+               return -ENOMEM;
+
+       regmap = device_node_to_regmap(node);
+       if (IS_ERR(regmap)) {
+               pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
+               return PTR_ERR(regmap);
+       }
+
+       for (i = 0; i < num; i++) {
+               const struct mtk_gate *gate = &clks[i];
+
+               if (!IS_ERR_OR_NULL(clk_data->clks[gate->id])) {
+                       pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
+                               node, gate->id);
+                       continue;
+               }
+
+               clk = mtk_clk_register_gate(gate->name, gate->parent_name,
+                                           regmap,
+                                           gate->regs->set_ofs,
+                                           gate->regs->clr_ofs,
+                                           gate->regs->sta_ofs,
+                                           gate->shift, gate->ops,
+                                           gate->flags, dev);
+
+               if (IS_ERR(clk)) {
+                       pr_err("Failed to register clk %s: %pe\n", gate->name, clk);
+                       goto err;
+               }
+
+               clk_data->clks[gate->id] = clk;
+       }
+
+       return 0;
+
+err:
+       while (--i >= 0) {
+               const struct mtk_gate *gate = &clks[i];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[gate->id]))
+                       continue;
+
+               mtk_clk_unregister_gate(clk_data->clks[gate->id]);
+               clk_data->clks[gate->id] = ERR_PTR(-ENOENT);
+       }
+
+       return PTR_ERR(clk);
+}
+
+int mtk_clk_register_gates(struct device_node *node,
+                          const struct mtk_gate *clks, int num,
+                          struct clk_onecell_data *clk_data)
+{
+       return mtk_clk_register_gates_with_dev(node, clks, num, clk_data, NULL);
+}
+EXPORT_SYMBOL_GPL(mtk_clk_register_gates);
+
+void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
+                             struct clk_onecell_data *clk_data)
+{
+       int i;
+
+       if (!clk_data)
+               return;
+
+       for (i = num; i > 0; i--) {
+               const struct mtk_gate *gate = &clks[i - 1];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[gate->id]))
+                       continue;
+
+               mtk_clk_unregister_gate(clk_data->clks[gate->id]);
+               clk_data->clks[gate->id] = ERR_PTR(-ENOENT);
+       }
+}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_gates);
 
 MODULE_LICENSE("GPL");
index 3c3329e..6b57388 100644 (file)
@@ -7,41 +7,34 @@
 #ifndef __DRV_CLK_GATE_H
 #define __DRV_CLK_GATE_H
 
-#include <linux/regmap.h>
-#include <linux/clk-provider.h>
+#include <linux/types.h>
 
 struct clk;
-
-struct mtk_clk_gate {
-       struct clk_hw   hw;
-       struct regmap   *regmap;
-       int             set_ofs;
-       int             clr_ofs;
-       int             sta_ofs;
-       u8              bit;
-};
-
-static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
-{
-       return container_of(hw, struct mtk_clk_gate, hw);
-}
+struct clk_onecell_data;
+struct clk_ops;
+struct device;
+struct device_node;
 
 extern const struct clk_ops mtk_clk_gate_ops_setclr;
 extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
 extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
 extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
 
-struct clk *mtk_clk_register_gate(
-               const char *name,
-               const char *parent_name,
-               struct regmap *regmap,
-               int set_ofs,
-               int clr_ofs,
-               int sta_ofs,
-               u8 bit,
-               const struct clk_ops *ops,
-               unsigned long flags,
-               struct device *dev);
+struct mtk_gate_regs {
+       u32 sta_ofs;
+       u32 clr_ofs;
+       u32 set_ofs;
+};
+
+struct mtk_gate {
+       int id;
+       const char *name;
+       const char *parent_name;
+       const struct mtk_gate_regs *regs;
+       int shift;
+       const struct clk_ops *ops;
+       unsigned long flags;
+};
 
 #define GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift,     \
                        _ops, _flags) {                         \
@@ -57,4 +50,16 @@ struct clk *mtk_clk_register_gate(
 #define GATE_MTK(_id, _name, _parent, _regs, _shift, _ops)             \
        GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, _ops, 0)
 
+int mtk_clk_register_gates(struct device_node *node,
+                          const struct mtk_gate *clks, int num,
+                          struct clk_onecell_data *clk_data);
+
+int mtk_clk_register_gates_with_dev(struct device_node *node,
+                                   const struct mtk_gate *clks, int num,
+                                   struct clk_onecell_data *clk_data,
+                                   struct device *dev);
+
+void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
+                             struct clk_onecell_data *clk_data);
+
 #endif /* __DRV_CLK_GATE_H */
index 695be0f..1eb3e45 100644 (file)
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 
-#include "clk-mtk.h"
-#include "clk-gate.h"
 #include "clk-cpumux.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt2701-clk.h>
 
index a3bd9a1..ff72b9a 100644 (file)
@@ -13,8 +13,9 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
-#include "clk-mtk.h"
 #include "clk-gate.h"
+#include "clk-pll.h"
+#include "clk-mtk.h"
 
 #include <dt-bindings/clock/mt2712-clk.h>
 
index d77ea5a..24829ca 100644 (file)
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 
-#include "clk-mtk.h"
 #include "clk-gate.h"
+#include "clk-mtk.h"
 #include "clk-mux.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt6765-clk.h>
 
index 9825385..7b61664 100644 (file)
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 
+#include "clk-gate.h"
 #include "clk-mtk.h"
 #include "clk-mux.h"
-#include "clk-gate.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt6779-clk.h>
 
index 428eb24..02259e8 100644 (file)
@@ -9,8 +9,9 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 
-#include "clk-mtk.h"
 #include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt6797-clk.h>
 
index ef5947e..0e1fb30 100644 (file)
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 
-#include "clk-mtk.h"
-#include "clk-gate.h"
 #include "clk-cpumux.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt7622-clk.h>
 #include <linux/clk.h> /* for consumer */
index a0ee079..c0e023b 100644 (file)
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 
-#include "clk-mtk.h"
-#include "clk-gate.h"
 #include "clk-cpumux.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt7629-clk.h>
 
index 98ec388..21d4c82 100644 (file)
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include "clk-mtk.h"
+
 #include "clk-gate.h"
+#include "clk-mtk.h"
 #include "clk-mux.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt7986-clk.h>
 #include <linux/clk.h>
index 9b4b645..09ad272 100644 (file)
@@ -11,8 +11,9 @@
 #include <linux/mfd/syscon.h>
 #include <dt-bindings/clock/mt8135-clk.h>
 
-#include "clk-mtk.h"
 #include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
 
 static DEFINE_SPINLOCK(mt8135_clk_lock);
 
index e5ea10e..812b33a 100644 (file)
@@ -12,8 +12,9 @@
 #include <linux/slab.h>
 #include <linux/mfd/syscon.h>
 
-#include "clk-mtk.h"
 #include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt8167-clk.h>
 
index 8f898ac..46b7655 100644 (file)
@@ -8,9 +8,10 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 
-#include "clk-mtk.h"
-#include "clk-gate.h"
 #include "clk-cpumux.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt8173-clk.h>
 
index 5046852..6849655 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
+#include "clk-gate.h"
 #include "clk-mtk.h"
 #include "clk-mux.h"
-#include "clk-gate.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt8183-clk.h>
 
index cbc7c6d..ab27cd6 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
+#include "clk-gate.h"
 #include "clk-mtk.h"
 #include "clk-mux.h"
-#include "clk-gate.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt8192-clk.h>
 
@@ -1236,9 +1237,17 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev)
 
        r = mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data);
        if (r)
-               return r;
+               goto free_clk_data;
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               goto free_clk_data;
 
-       return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       return r;
+
+free_clk_data:
+       mtk_free_clk_data(clk_data);
+       return r;
 }
 
 static int clk_mt8192_peri_probe(struct platform_device *pdev)
@@ -1253,9 +1262,17 @@ static int clk_mt8192_peri_probe(struct platform_device *pdev)
 
        r = mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data);
        if (r)
-               return r;
+               goto free_clk_data;
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               goto free_clk_data;
+
+       return r;
 
-       return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+free_clk_data:
+       mtk_free_clk_data(clk_data);
+       return r;
 }
 
 static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
@@ -1271,9 +1288,17 @@ static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
        mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
        r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
        if (r)
-               return r;
+               goto free_clk_data;
 
-       return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               goto free_clk_data;
+
+       return r;
+
+free_clk_data:
+       mtk_free_clk_data(clk_data);
+       return r;
 }
 
 static const struct of_device_id of_match_clk_mt8192[] = {
index 6156cee..eecc703 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "clk-gate.h"
 #include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt8195-clk.h>
 #include <linux/of_device.h>
@@ -119,24 +120,47 @@ static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
        if (!clk_data)
                return -ENOMEM;
 
-       mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
-       r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+       r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
        if (r)
                goto free_apmixed_data;
 
+       r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+       if (r)
+               goto unregister_plls;
+
        r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
        if (r)
-               goto free_apmixed_data;
+               goto unregister_gates;
+
+       platform_set_drvdata(pdev, clk_data);
 
        return r;
 
+unregister_gates:
+       mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+unregister_plls:
+       mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
 free_apmixed_data:
        mtk_free_clk_data(clk_data);
        return r;
 }
 
+static int clk_mt8195_apmixed_remove(struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+       of_clk_del_provider(node);
+       mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+       mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+       mtk_free_clk_data(clk_data);
+
+       return 0;
+}
+
 static struct platform_driver clk_mt8195_apmixed_drv = {
        .probe = clk_mt8195_apmixed_probe,
+       .remove = clk_mt8195_apmixed_remove,
        .driver = {
                .name = "clk-mt8195-apmixed",
                .of_match_table = of_match_clk_mt8195_apmixed,
index f1c8418..8cd88df 100644 (file)
@@ -4,6 +4,7 @@
 // Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
 
 #include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt8195-clk.h>
 #include <linux/clk-provider.h>
@@ -65,18 +66,37 @@ static int clk_mt8195_apusys_pll_probe(struct platform_device *pdev)
        if (!clk_data)
                return -ENOMEM;
 
-       mtk_clk_register_plls(node, apusys_plls, ARRAY_SIZE(apusys_plls), clk_data);
-       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       r = mtk_clk_register_plls(node, apusys_plls, ARRAY_SIZE(apusys_plls), clk_data);
        if (r)
                goto free_apusys_pll_data;
 
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               goto unregister_plls;
+
+       platform_set_drvdata(pdev, clk_data);
+
        return r;
 
+unregister_plls:
+       mtk_clk_unregister_plls(apusys_plls, ARRAY_SIZE(apusys_plls), clk_data);
 free_apusys_pll_data:
        mtk_free_clk_data(clk_data);
        return r;
 }
 
+static int clk_mt8195_apusys_pll_remove(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+       struct device_node *node = pdev->dev.of_node;
+
+       of_clk_del_provider(node);
+       mtk_clk_unregister_plls(apusys_plls, ARRAY_SIZE(apusys_plls), clk_data);
+       mtk_free_clk_data(clk_data);
+
+       return 0;
+}
+
 static const struct of_device_id of_match_clk_mt8195_apusys_pll[] = {
        { .compatible = "mediatek,mt8195-apusys_pll", },
        {}
@@ -84,6 +104,7 @@ static const struct of_device_id of_match_clk_mt8195_apusys_pll[] = {
 
 static struct platform_driver clk_mt8195_apusys_pll_drv = {
        .probe = clk_mt8195_apusys_pll_probe,
+       .remove = clk_mt8195_apusys_pll_remove,
        .driver = {
                .name = "clk-mt8195-apusys_pll",
                .of_match_table = of_match_clk_mt8195_apusys_pll,
index 3d261fc..e4d00fe 100644 (file)
@@ -134,6 +134,7 @@ static const struct of_device_id of_match_clk_mt8195_cam[] = {
 
 static struct platform_driver clk_mt8195_cam_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-cam",
                .of_match_table = of_match_clk_mt8195_cam,
index f846f1d..4e326b6 100644 (file)
@@ -42,6 +42,7 @@ static const struct of_device_id of_match_clk_mt8195_ccu[] = {
 
 static struct platform_driver clk_mt8195_ccu_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-ccu",
                .of_match_table = of_match_clk_mt8195_ccu,
index 22b52a8..12f5c43 100644 (file)
@@ -88,6 +88,7 @@ static const struct of_device_id of_match_clk_mt8195_img[] = {
 
 static struct platform_driver clk_mt8195_img_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-img",
                .of_match_table = of_match_clk_mt8195_img,
index 4ab312e..fbc809d 100644 (file)
@@ -58,6 +58,7 @@ static const struct of_device_id of_match_clk_mt8195_imp_iic_wrap[] = {
 
 static struct platform_driver clk_mt8195_imp_iic_wrap_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-imp_iic_wrap",
                .of_match_table = of_match_clk_mt8195_imp_iic_wrap,
index 5f9b699..8ebe3b9 100644 (file)
@@ -198,6 +198,7 @@ static const struct of_device_id of_match_clk_mt8195_infra_ao[] = {
 
 static struct platform_driver clk_mt8195_infra_ao_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-infra_ao",
                .of_match_table = of_match_clk_mt8195_infra_ao,
index fc1d42b..b0d745c 100644 (file)
@@ -43,6 +43,7 @@ static const struct of_device_id of_match_clk_mt8195_ipe[] = {
 
 static struct platform_driver clk_mt8195_ipe_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-ipe",
                .of_match_table = of_match_clk_mt8195_ipe,
index aca6d9c..9411c55 100644 (file)
@@ -39,6 +39,7 @@ static const struct of_device_id of_match_clk_mt8195_mfg[] = {
 
 static struct platform_driver clk_mt8195_mfg_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-mfg",
                .of_match_table = of_match_clk_mt8195_mfg,
index 907a92b..2f6b3bb 100644 (file)
@@ -54,6 +54,7 @@ static const struct of_device_id of_match_clk_mt8195_peri_ao[] = {
 
 static struct platform_driver clk_mt8195_peri_ao_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-peri_ao",
                .of_match_table = of_match_clk_mt8195_peri_ao,
index 26b4846..e16c383 100644 (file)
@@ -39,6 +39,7 @@ static const struct of_device_id of_match_clk_mt8195_scp_adsp[] = {
 
 static struct platform_driver clk_mt8195_scp_adsp_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-scp_adsp",
                .of_match_table = of_match_clk_mt8195_scp_adsp,
index 3e2aba9..b602fcd 100644 (file)
@@ -1239,32 +1239,79 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
                goto free_top_data;
        }
 
-       mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
-                                   top_clk_data);
-       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
-       mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
-                              &mt8195_clk_lock, top_clk_data);
-       mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
-                                   &mt8195_clk_lock, top_clk_data);
-       mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
-                                   &mt8195_clk_lock, top_clk_data);
-       r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+       r = mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+                                       top_clk_data);
        if (r)
                goto free_top_data;
 
+       r = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+       if (r)
+               goto unregister_fixed_clks;
+
+       r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
+                                  &mt8195_clk_lock, top_clk_data);
+       if (r)
+               goto unregister_factors;
+
+       r = mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+                                       &mt8195_clk_lock, top_clk_data);
+       if (r)
+               goto unregister_muxes;
+
+       r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+                                       &mt8195_clk_lock, top_clk_data);
+       if (r)
+               goto unregister_composite_muxes;
+
+       r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+       if (r)
+               goto unregister_composite_divs;
+
        r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
        if (r)
-               goto free_top_data;
+               goto unregister_gates;
+
+       platform_set_drvdata(pdev, top_clk_data);
 
        return r;
 
+unregister_gates:
+       mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+unregister_composite_divs:
+       mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
+unregister_composite_muxes:
+       mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), top_clk_data);
+unregister_muxes:
+       mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
+unregister_factors:
+       mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+unregister_fixed_clks:
+       mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
 free_top_data:
        mtk_free_clk_data(top_clk_data);
        return r;
 }
 
+static int clk_mt8195_topck_remove(struct platform_device *pdev)
+{
+       struct clk_onecell_data *top_clk_data = platform_get_drvdata(pdev);
+       struct device_node *node = pdev->dev.of_node;
+
+       of_clk_del_provider(node);
+       mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+       mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
+       mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), top_clk_data);
+       mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
+       mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+       mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
+       mtk_free_clk_data(top_clk_data);
+
+       return 0;
+}
+
 static struct platform_driver clk_mt8195_topck_drv = {
        .probe = clk_mt8195_topck_probe,
+       .remove = clk_mt8195_topck_remove,
        .driver = {
                .name = "clk-mt8195-topck",
                .of_match_table = of_match_clk_mt8195_topck,
index a1df04f..a1446b6 100644 (file)
@@ -96,6 +96,7 @@ static const struct of_device_id of_match_clk_mt8195_vdec[] = {
 
 static struct platform_driver clk_mt8195_vdec_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-vdec",
                .of_match_table = of_match_clk_mt8195_vdec,
index f7ff761..3bc7ed1 100644 (file)
@@ -105,17 +105,35 @@ static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
 
        r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
        if (r)
-               goto free_vdo0_data;
+               goto unregister_gates;
+
+       platform_set_drvdata(pdev, clk_data);
 
        return r;
 
+unregister_gates:
+       mtk_clk_unregister_gates(vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
 free_vdo0_data:
        mtk_free_clk_data(clk_data);
        return r;
 }
 
+static int clk_mt8195_vdo0_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->parent->of_node;
+       struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+       of_clk_del_provider(node);
+       mtk_clk_unregister_gates(vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
+       mtk_free_clk_data(clk_data);
+
+       return 0;
+}
+
 static struct platform_driver clk_mt8195_vdo0_drv = {
        .probe = clk_mt8195_vdo0_probe,
+       .remove = clk_mt8195_vdo0_remove,
        .driver = {
                .name = "clk-mt8195-vdo0",
        },
index 03df8ea..90c738a 100644 (file)
@@ -122,17 +122,35 @@ static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
 
        r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
        if (r)
-               goto free_vdo1_data;
+               goto unregister_gates;
+
+       platform_set_drvdata(pdev, clk_data);
 
        return r;
 
+unregister_gates:
+       mtk_clk_unregister_gates(vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
 free_vdo1_data:
        mtk_free_clk_data(clk_data);
        return r;
 }
 
+static int clk_mt8195_vdo1_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->parent->of_node;
+       struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+       of_clk_del_provider(node);
+       mtk_clk_unregister_gates(vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
+       mtk_free_clk_data(clk_data);
+
+       return 0;
+}
+
 static struct platform_driver clk_mt8195_vdo1_drv = {
        .probe = clk_mt8195_vdo1_probe,
+       .remove = clk_mt8195_vdo1_remove,
        .driver = {
                .name = "clk-mt8195-vdo1",
        },
index 7339851..622f578 100644 (file)
@@ -61,6 +61,7 @@ static const struct of_device_id of_match_clk_mt8195_venc[] = {
 
 static struct platform_driver clk_mt8195_venc_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-venc",
                .of_match_table = of_match_clk_mt8195_venc,
index c324146..bf2939c 100644 (file)
@@ -102,6 +102,7 @@ static const struct of_device_id of_match_clk_mt8195_vpp0[] = {
 
 static struct platform_driver clk_mt8195_vpp0_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-vpp0",
                .of_match_table = of_match_clk_mt8195_vpp0,
index ce0b9a4..ffd52c7 100644 (file)
@@ -100,6 +100,7 @@ static const struct of_device_id of_match_clk_mt8195_vpp1[] = {
 
 static struct platform_driver clk_mt8195_vpp1_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-vpp1",
                .of_match_table = of_match_clk_mt8195_vpp1,
index 274d608..b483fab 100644 (file)
@@ -135,6 +135,7 @@ static const struct of_device_id of_match_clk_mt8195_wpe[] = {
 
 static struct platform_driver clk_mt8195_wpe_drv = {
        .probe = mtk_clk_simple_probe,
+       .remove = mtk_clk_simple_remove,
        .driver = {
                .name = "clk-mt8195-wpe",
                .of_match_table = of_match_clk_mt8195_wpe,
index 9d4261e..a37143f 100644 (file)
@@ -11,8 +11,9 @@
 #include <linux/slab.h>
 #include <linux/mfd/syscon.h>
 
-#include "clk-mtk.h"
 #include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-pll.h"
 
 #include <dt-bindings/clock/mt8516-clk.h>
 
index 8d5791b..b406326 100644 (file)
@@ -4,17 +4,16 @@
  * Author: James Liao <jamesjj.liao@mediatek.com>
  */
 
-#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/clkdev.h>
-#include <linux/module.h>
 #include <linux/mfd/syscon.h>
-#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
 
 #include "clk-mtk.h"
 #include "clk-gate.h"
@@ -54,112 +53,135 @@ void mtk_free_clk_data(struct clk_onecell_data *clk_data)
        kfree(clk_data);
 }
 
-void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
-               int num, struct clk_onecell_data *clk_data)
+int mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks, int num,
+                               struct clk_onecell_data *clk_data)
 {
        int i;
        struct clk *clk;
 
+       if (!clk_data)
+               return -ENOMEM;
+
        for (i = 0; i < num; i++) {
                const struct mtk_fixed_clk *rc = &clks[i];
 
-               if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[rc->id]))
+               if (!IS_ERR_OR_NULL(clk_data->clks[rc->id])) {
+                       pr_warn("Trying to register duplicate clock ID: %d\n", rc->id);
                        continue;
+               }
 
                clk = clk_register_fixed_rate(NULL, rc->name, rc->parent, 0,
                                              rc->rate);
 
                if (IS_ERR(clk)) {
-                       pr_err("Failed to register clk %s: %ld\n",
-                                       rc->name, PTR_ERR(clk));
-                       continue;
+                       pr_err("Failed to register clk %s: %pe\n", rc->name, clk);
+                       goto err;
                }
 
-               if (clk_data)
-                       clk_data->clks[rc->id] = clk;
+               clk_data->clks[rc->id] = clk;
+       }
+
+       return 0;
+
+err:
+       while (--i >= 0) {
+               const struct mtk_fixed_clk *rc = &clks[i];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[rc->id]))
+                       continue;
+
+               clk_unregister_fixed_rate(clk_data->clks[rc->id]);
+               clk_data->clks[rc->id] = ERR_PTR(-ENOENT);
        }
+
+       return PTR_ERR(clk);
 }
 EXPORT_SYMBOL_GPL(mtk_clk_register_fixed_clks);
 
-void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
-               int num, struct clk_onecell_data *clk_data)
+void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num,
+                                  struct clk_onecell_data *clk_data)
 {
        int i;
-       struct clk *clk;
 
-       for (i = 0; i < num; i++) {
-               const struct mtk_fixed_factor *ff = &clks[i];
-
-               if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[ff->id]))
-                       continue;
+       if (!clk_data)
+               return;
 
-               clk = clk_register_fixed_factor(NULL, ff->name, ff->parent_name,
-                               CLK_SET_RATE_PARENT, ff->mult, ff->div);
+       for (i = num; i > 0; i--) {
+               const struct mtk_fixed_clk *rc = &clks[i - 1];
 
-               if (IS_ERR(clk)) {
-                       pr_err("Failed to register clk %s: %ld\n",
-                                       ff->name, PTR_ERR(clk));
+               if (IS_ERR_OR_NULL(clk_data->clks[rc->id]))
                        continue;
-               }
 
-               if (clk_data)
-                       clk_data->clks[ff->id] = clk;
+               clk_unregister_fixed_rate(clk_data->clks[rc->id]);
+               clk_data->clks[rc->id] = ERR_PTR(-ENOENT);
        }
 }
-EXPORT_SYMBOL_GPL(mtk_clk_register_factors);
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_fixed_clks);
 
-int mtk_clk_register_gates_with_dev(struct device_node *node,
-               const struct mtk_gate *clks,
-               int num, struct clk_onecell_data *clk_data,
-               struct device *dev)
+int mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num,
+                            struct clk_onecell_data *clk_data)
 {
        int i;
        struct clk *clk;
-       struct regmap *regmap;
 
        if (!clk_data)
                return -ENOMEM;
 
-       regmap = device_node_to_regmap(node);
-       if (IS_ERR(regmap)) {
-               pr_err("Cannot find regmap for %pOF: %ld\n", node,
-                               PTR_ERR(regmap));
-               return PTR_ERR(regmap);
-       }
-
        for (i = 0; i < num; i++) {
-               const struct mtk_gate *gate = &clks[i];
+               const struct mtk_fixed_factor *ff = &clks[i];
 
-               if (!IS_ERR_OR_NULL(clk_data->clks[gate->id]))
+               if (!IS_ERR_OR_NULL(clk_data->clks[ff->id])) {
+                       pr_warn("Trying to register duplicate clock ID: %d\n", ff->id);
                        continue;
+               }
 
-               clk = mtk_clk_register_gate(gate->name, gate->parent_name,
-                               regmap,
-                               gate->regs->set_ofs,
-                               gate->regs->clr_ofs,
-                               gate->regs->sta_ofs,
-                               gate->shift, gate->ops, gate->flags, dev);
+               clk = clk_register_fixed_factor(NULL, ff->name, ff->parent_name,
+                               CLK_SET_RATE_PARENT, ff->mult, ff->div);
 
                if (IS_ERR(clk)) {
-                       pr_err("Failed to register clk %s: %ld\n",
-                                       gate->name, PTR_ERR(clk));
-                       continue;
+                       pr_err("Failed to register clk %s: %pe\n", ff->name, clk);
+                       goto err;
                }
 
-               clk_data->clks[gate->id] = clk;
+               clk_data->clks[ff->id] = clk;
        }
 
        return 0;
+
+err:
+       while (--i >= 0) {
+               const struct mtk_fixed_factor *ff = &clks[i];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[ff->id]))
+                       continue;
+
+               clk_unregister_fixed_factor(clk_data->clks[ff->id]);
+               clk_data->clks[ff->id] = ERR_PTR(-ENOENT);
+       }
+
+       return PTR_ERR(clk);
 }
+EXPORT_SYMBOL_GPL(mtk_clk_register_factors);
 
-int mtk_clk_register_gates(struct device_node *node,
-               const struct mtk_gate *clks,
-               int num, struct clk_onecell_data *clk_data)
+void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
+                               struct clk_onecell_data *clk_data)
 {
-       return mtk_clk_register_gates_with_dev(node,
-               clks, num, clk_data, NULL);
+       int i;
+
+       if (!clk_data)
+               return;
+
+       for (i = num; i > 0; i--) {
+               const struct mtk_fixed_factor *ff = &clks[i - 1];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[ff->id]))
+                       continue;
+
+               clk_unregister_fixed_factor(clk_data->clks[ff->id]);
+               clk_data->clks[ff->id] = ERR_PTR(-ENOENT);
+       }
 }
-EXPORT_SYMBOL_GPL(mtk_clk_register_gates);
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_factors);
 
 struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
                void __iomem *base, spinlock_t *lock)
@@ -248,58 +270,161 @@ err_out:
        return ERR_PTR(ret);
 }
 
-void mtk_clk_register_composites(const struct mtk_composite *mcs,
-               int num, void __iomem *base, spinlock_t *lock,
-               struct clk_onecell_data *clk_data)
+static void mtk_clk_unregister_composite(struct clk *clk)
+{
+       struct clk_hw *hw;
+       struct clk_composite *composite;
+       struct clk_mux *mux = NULL;
+       struct clk_gate *gate = NULL;
+       struct clk_divider *div = NULL;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       composite = to_clk_composite(hw);
+       if (composite->mux_hw)
+               mux = to_clk_mux(composite->mux_hw);
+       if (composite->gate_hw)
+               gate = to_clk_gate(composite->gate_hw);
+       if (composite->rate_hw)
+               div = to_clk_divider(composite->rate_hw);
+
+       clk_unregister_composite(clk);
+       kfree(div);
+       kfree(gate);
+       kfree(mux);
+}
+
+int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
+                               void __iomem *base, spinlock_t *lock,
+                               struct clk_onecell_data *clk_data)
 {
        struct clk *clk;
        int i;
 
+       if (!clk_data)
+               return -ENOMEM;
+
        for (i = 0; i < num; i++) {
                const struct mtk_composite *mc = &mcs[i];
 
-               if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[mc->id]))
+               if (!IS_ERR_OR_NULL(clk_data->clks[mc->id])) {
+                       pr_warn("Trying to register duplicate clock ID: %d\n",
+                               mc->id);
                        continue;
+               }
 
                clk = mtk_clk_register_composite(mc, base, lock);
 
                if (IS_ERR(clk)) {
-                       pr_err("Failed to register clk %s: %ld\n",
-                                       mc->name, PTR_ERR(clk));
-                       continue;
+                       pr_err("Failed to register clk %s: %pe\n", mc->name, clk);
+                       goto err;
                }
 
-               if (clk_data)
-                       clk_data->clks[mc->id] = clk;
+               clk_data->clks[mc->id] = clk;
+       }
+
+       return 0;
+
+err:
+       while (--i >= 0) {
+               const struct mtk_composite *mc = &mcs[i];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[mcs->id]))
+                       continue;
+
+               mtk_clk_unregister_composite(clk_data->clks[mc->id]);
+               clk_data->clks[mc->id] = ERR_PTR(-ENOENT);
        }
+
+       return PTR_ERR(clk);
 }
 EXPORT_SYMBOL_GPL(mtk_clk_register_composites);
 
-void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
-                       int num, void __iomem *base, spinlock_t *lock,
-                               struct clk_onecell_data *clk_data)
+void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
+                                  struct clk_onecell_data *clk_data)
+{
+       int i;
+
+       if (!clk_data)
+               return;
+
+       for (i = num; i > 0; i--) {
+               const struct mtk_composite *mc = &mcs[i - 1];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[mc->id]))
+                       continue;
+
+               mtk_clk_unregister_composite(clk_data->clks[mc->id]);
+               clk_data->clks[mc->id] = ERR_PTR(-ENOENT);
+       }
+}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_composites);
+
+int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num,
+                             void __iomem *base, spinlock_t *lock,
+                             struct clk_onecell_data *clk_data)
 {
        struct clk *clk;
        int i;
 
+       if (!clk_data)
+               return -ENOMEM;
+
        for (i = 0; i <  num; i++) {
                const struct mtk_clk_divider *mcd = &mcds[i];
 
-               if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[mcd->id]))
+               if (!IS_ERR_OR_NULL(clk_data->clks[mcd->id])) {
+                       pr_warn("Trying to register duplicate clock ID: %d\n",
+                               mcd->id);
                        continue;
+               }
 
                clk = clk_register_divider(NULL, mcd->name, mcd->parent_name,
                        mcd->flags, base +  mcd->div_reg, mcd->div_shift,
                        mcd->div_width, mcd->clk_divider_flags, lock);
 
                if (IS_ERR(clk)) {
-                       pr_err("Failed to register clk %s: %ld\n",
-                               mcd->name, PTR_ERR(clk));
-                       continue;
+                       pr_err("Failed to register clk %s: %pe\n", mcd->name, clk);
+                       goto err;
                }
 
-               if (clk_data)
-                       clk_data->clks[mcd->id] = clk;
+               clk_data->clks[mcd->id] = clk;
+       }
+
+       return 0;
+
+err:
+       while (--i >= 0) {
+               const struct mtk_clk_divider *mcd = &mcds[i];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[mcd->id]))
+                       continue;
+
+               mtk_clk_unregister_composite(clk_data->clks[mcd->id]);
+               clk_data->clks[mcd->id] = ERR_PTR(-ENOENT);
+       }
+
+       return PTR_ERR(clk);
+}
+
+void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
+                                struct clk_onecell_data *clk_data)
+{
+       int i;
+
+       if (!clk_data)
+               return;
+
+       for (i = num; i > 0; i--) {
+               const struct mtk_clk_divider *mcd = &mcds[i - 1];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[mcd->id]))
+                       continue;
+
+               clk_unregister_divider(clk_data->clks[mcd->id]);
+               clk_data->clks[mcd->id] = ERR_PTR(-ENOENT);
        }
 }
 
@@ -324,13 +449,30 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
 
        r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
        if (r)
-               goto free_data;
+               goto unregister_clks;
+
+       platform_set_drvdata(pdev, clk_data);
 
        return r;
 
+unregister_clks:
+       mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
 free_data:
        mtk_free_clk_data(clk_data);
        return r;
 }
 
+int mtk_clk_simple_remove(struct platform_device *pdev)
+{
+       const struct mtk_clk_desc *mcd = of_device_get_match_data(&pdev->dev);
+       struct clk_onecell_data *clk_data = platform_get_drvdata(pdev);
+       struct device_node *node = pdev->dev.of_node;
+
+       of_clk_del_provider(node);
+       mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+       mtk_free_clk_data(clk_data);
+
+       return 0;
+}
+
 MODULE_LICENSE("GPL");
index 0ff289d..bf6565a 100644 (file)
@@ -7,19 +7,19 @@
 #ifndef __DRV_CLK_MTK_H
 #define __DRV_CLK_MTK_H
 
-#include <linux/regmap.h>
-#include <linux/bitops.h>
 #include <linux/clk-provider.h>
-#include <linux/platform_device.h>
-
-struct clk;
-struct clk_onecell_data;
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
 
 #define MAX_MUX_GATE_BIT       31
 #define INVALID_MUX_GATE_BIT   (MAX_MUX_GATE_BIT + 1)
 
 #define MHZ (1000 * 1000)
 
+struct platform_device;
+
 struct mtk_fixed_clk {
        int id;
        const char *name;
@@ -34,8 +34,10 @@ struct mtk_fixed_clk {
                .rate = _rate,                          \
        }
 
-void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
-               int num, struct clk_onecell_data *clk_data);
+int mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks, int num,
+                               struct clk_onecell_data *clk_data);
+void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num,
+                                  struct clk_onecell_data *clk_data);
 
 struct mtk_fixed_factor {
        int id;
@@ -53,8 +55,10 @@ struct mtk_fixed_factor {
                .div = _div,                            \
        }
 
-void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
-               int num, struct clk_onecell_data *clk_data);
+int mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num,
+                            struct clk_onecell_data *clk_data);
+void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
+                               struct clk_onecell_data *clk_data);
 
 struct mtk_composite {
        int id;
@@ -146,34 +150,11 @@ struct mtk_composite {
 struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
                void __iomem *base, spinlock_t *lock);
 
-void mtk_clk_register_composites(const struct mtk_composite *mcs,
-               int num, void __iomem *base, spinlock_t *lock,
-               struct clk_onecell_data *clk_data);
-
-struct mtk_gate_regs {
-       u32 sta_ofs;
-       u32 clr_ofs;
-       u32 set_ofs;
-};
-
-struct mtk_gate {
-       int id;
-       const char *name;
-       const char *parent_name;
-       const struct mtk_gate_regs *regs;
-       int shift;
-       const struct clk_ops *ops;
-       unsigned long flags;
-};
-
-int mtk_clk_register_gates(struct device_node *node,
-                       const struct mtk_gate *clks, int num,
-                       struct clk_onecell_data *clk_data);
-
-int mtk_clk_register_gates_with_dev(struct device_node *node,
-               const struct mtk_gate *clks,
-               int num, struct clk_onecell_data *clk_data,
-               struct device *dev);
+int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
+                               void __iomem *base, spinlock_t *lock,
+                               struct clk_onecell_data *clk_data);
+void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
+                                  struct clk_onecell_data *clk_data);
 
 struct mtk_clk_divider {
        int id;
@@ -197,52 +178,15 @@ struct mtk_clk_divider {
                .div_width = _width,                            \
 }
 
-void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
-                       int num, void __iomem *base, spinlock_t *lock,
-                               struct clk_onecell_data *clk_data);
+int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num,
+                             void __iomem *base, spinlock_t *lock,
+                             struct clk_onecell_data *clk_data);
+void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
+                                struct clk_onecell_data *clk_data);
 
 struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
 void mtk_free_clk_data(struct clk_onecell_data *clk_data);
 
-#define HAVE_RST_BAR   BIT(0)
-#define PLL_AO         BIT(1)
-
-struct mtk_pll_div_table {
-       u32 div;
-       unsigned long freq;
-};
-
-struct mtk_pll_data {
-       int id;
-       const char *name;
-       u32 reg;
-       u32 pwr_reg;
-       u32 en_mask;
-       u32 pd_reg;
-       u32 tuner_reg;
-       u32 tuner_en_reg;
-       u8 tuner_en_bit;
-       int pd_shift;
-       unsigned int flags;
-       const struct clk_ops *ops;
-       u32 rst_bar_mask;
-       unsigned long fmin;
-       unsigned long fmax;
-       int pcwbits;
-       int pcwibits;
-       u32 pcw_reg;
-       int pcw_shift;
-       u32 pcw_chg_reg;
-       const struct mtk_pll_div_table *div_table;
-       const char *parent_name;
-       u32 en_reg;
-       u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
-};
-
-void mtk_clk_register_plls(struct device_node *node,
-               const struct mtk_pll_data *plls, int num_plls,
-               struct clk_onecell_data *clk_data);
-
 struct clk *mtk_clk_register_ref2usb_tx(const char *name,
                        const char *parent_name, void __iomem *reg);
 
@@ -258,5 +202,6 @@ struct mtk_clk_desc {
 };
 
 int mtk_clk_simple_probe(struct platform_device *pdev);
+int mtk_clk_simple_remove(struct platform_device *pdev);
 
 #endif /* __DRV_CLK_MTK_H */
index 6d3a50e..21ad5a4 100644 (file)
@@ -4,15 +4,26 @@
  * Author: Owen Chen <owen.chen@mediatek.com>
  */
 
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/compiler_types.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
 
-#include "clk-mtk.h"
 #include "clk-mux.h"
 
+struct mtk_clk_mux {
+       struct clk_hw hw;
+       struct regmap *regmap;
+       const struct mtk_mux *data;
+       spinlock_t *lock;
+       bool reparent;
+};
+
 static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
 {
        return container_of(hw, struct mtk_clk_mux, hw);
@@ -164,6 +175,21 @@ static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
        return clk;
 }
 
+static void mtk_clk_unregister_mux(struct clk *clk)
+{
+       struct mtk_clk_mux *mux;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       mux = to_mtk_clk_mux(hw);
+
+       clk_unregister(clk);
+       kfree(mux);
+}
+
 int mtk_clk_register_muxes(const struct mtk_mux *muxes,
                           int num, struct device_node *node,
                           spinlock_t *lock,
@@ -175,29 +201,64 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
 
        regmap = device_node_to_regmap(node);
        if (IS_ERR(regmap)) {
-               pr_err("Cannot find regmap for %pOF: %ld\n", node,
-                      PTR_ERR(regmap));
+               pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
                return PTR_ERR(regmap);
        }
 
        for (i = 0; i < num; i++) {
                const struct mtk_mux *mux = &muxes[i];
 
-               if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
-                       clk = mtk_clk_register_mux(mux, regmap, lock);
+               if (!IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
+                       pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
+                               node, mux->id);
+                       continue;
+               }
 
-                       if (IS_ERR(clk)) {
-                               pr_err("Failed to register clk %s: %ld\n",
-                                      mux->name, PTR_ERR(clk));
-                               continue;
-                       }
+               clk = mtk_clk_register_mux(mux, regmap, lock);
 
-                       clk_data->clks[mux->id] = clk;
+               if (IS_ERR(clk)) {
+                       pr_err("Failed to register clk %s: %pe\n", mux->name, clk);
+                       goto err;
                }
+
+               clk_data->clks[mux->id] = clk;
        }
 
        return 0;
+
+err:
+       while (--i >= 0) {
+               const struct mtk_mux *mux = &muxes[i];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[mux->id]))
+                       continue;
+
+               mtk_clk_unregister_mux(clk_data->clks[mux->id]);
+               clk_data->clks[mux->id] = ERR_PTR(-ENOENT);
+       }
+
+       return PTR_ERR(clk);
 }
 EXPORT_SYMBOL_GPL(mtk_clk_register_muxes);
 
+void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
+                             struct clk_onecell_data *clk_data)
+{
+       int i;
+
+       if (!clk_data)
+               return;
+
+       for (i = num; i > 0; i--) {
+               const struct mtk_mux *mux = &muxes[i - 1];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[mux->id]))
+                       continue;
+
+               mtk_clk_unregister_mux(clk_data->clks[mux->id]);
+               clk_data->clks[mux->id] = ERR_PTR(-ENOENT);
+       }
+}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_muxes);
+
 MODULE_LICENSE("GPL");
index 27841d6..903a3c9 100644 (file)
@@ -7,15 +7,13 @@
 #ifndef __DRV_CLK_MTK_MUX_H
 #define __DRV_CLK_MTK_MUX_H
 
-#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
 
-struct mtk_clk_mux {
-       struct clk_hw hw;
-       struct regmap *regmap;
-       const struct mtk_mux *data;
-       spinlock_t *lock;
-       bool reparent;
-};
+struct clk;
+struct clk_onecell_data;
+struct clk_ops;
+struct device_node;
 
 struct mtk_mux {
        int id;
@@ -88,4 +86,7 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
                           spinlock_t *lock,
                           struct clk_onecell_data *clk_data);
 
+void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
+                             struct clk_onecell_data *clk_data);
+
 #endif /* __DRV_CLK_MTK_MUX_H */
index 60d7ffa..ccaa208 100644 (file)
@@ -4,15 +4,18 @@
  * Author: James Liao <jamesjj.liao@mediatek.com>
  */
 
-#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of_address.h>
 #include <linux/slab.h>
-#include <linux/clkdev.h>
-#include <linux/delay.h>
 
-#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MHZ                    (1000 * 1000)
 
 #define REG_CON0               0
 #define REG_CON1               4
@@ -359,8 +362,24 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
        return clk;
 }
 
-void mtk_clk_register_plls(struct device_node *node,
-               const struct mtk_pll_data *plls, int num_plls, struct clk_onecell_data *clk_data)
+static void mtk_clk_unregister_pll(struct clk *clk)
+{
+       struct clk_hw *hw;
+       struct mtk_clk_pll *pll;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       pll = to_mtk_clk_pll(hw);
+
+       clk_unregister(clk);
+       kfree(pll);
+}
+
+int mtk_clk_register_plls(struct device_node *node,
+                         const struct mtk_pll_data *plls, int num_plls,
+                         struct clk_onecell_data *clk_data)
 {
        void __iomem *base;
        int i;
@@ -369,23 +388,82 @@ void mtk_clk_register_plls(struct device_node *node,
        base = of_iomap(node, 0);
        if (!base) {
                pr_err("%s(): ioremap failed\n", __func__);
-               return;
+               return -EINVAL;
        }
 
        for (i = 0; i < num_plls; i++) {
                const struct mtk_pll_data *pll = &plls[i];
 
+               if (!IS_ERR_OR_NULL(clk_data->clks[pll->id])) {
+                       pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
+                               node, pll->id);
+                       continue;
+               }
+
                clk = mtk_clk_register_pll(pll, base);
 
                if (IS_ERR(clk)) {
-                       pr_err("Failed to register clk %s: %ld\n",
-                                       pll->name, PTR_ERR(clk));
-                       continue;
+                       pr_err("Failed to register clk %s: %pe\n", pll->name, clk);
+                       goto err;
                }
 
                clk_data->clks[pll->id] = clk;
        }
+
+       return 0;
+
+err:
+       while (--i >= 0) {
+               const struct mtk_pll_data *pll = &plls[i];
+
+               mtk_clk_unregister_pll(clk_data->clks[pll->id]);
+               clk_data->clks[pll->id] = ERR_PTR(-ENOENT);
+       }
+
+       iounmap(base);
+
+       return PTR_ERR(clk);
 }
 EXPORT_SYMBOL_GPL(mtk_clk_register_plls);
 
+static __iomem void *mtk_clk_pll_get_base(struct clk *clk,
+                                         const struct mtk_pll_data *data)
+{
+       struct clk_hw *hw = __clk_get_hw(clk);
+       struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+       return pll->base_addr - data->reg;
+}
+
+void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
+                            struct clk_onecell_data *clk_data)
+{
+       __iomem void *base = NULL;
+       int i;
+
+       if (!clk_data)
+               return;
+
+       for (i = num_plls; i > 0; i--) {
+               const struct mtk_pll_data *pll = &plls[i - 1];
+
+               if (IS_ERR_OR_NULL(clk_data->clks[pll->id]))
+                       continue;
+
+               /*
+                * This is quite ugly but unfortunately the clks don't have
+                * any device tied to them, so there's no place to store the
+                * pointer to the I/O region base address. We have to fetch
+                * it from one of the registered clks.
+                */
+               base = mtk_clk_pll_get_base(clk_data->clks[pll->id], pll);
+
+               mtk_clk_unregister_pll(clk_data->clks[pll->id]);
+               clk_data->clks[pll->id] = ERR_PTR(-ENOENT);
+       }
+
+       iounmap(base);
+}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_plls);
+
 MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h
new file mode 100644 (file)
index 0000000..bf06e44
--- /dev/null
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#ifndef __DRV_CLK_MTK_PLL_H
+#define __DRV_CLK_MTK_PLL_H
+
+#include <linux/types.h>
+
+struct clk_ops;
+struct clk_onecell_data;
+struct device_node;
+
+struct mtk_pll_div_table {
+       u32 div;
+       unsigned long freq;
+};
+
+#define HAVE_RST_BAR   BIT(0)
+#define PLL_AO         BIT(1)
+
+struct mtk_pll_data {
+       int id;
+       const char *name;
+       u32 reg;
+       u32 pwr_reg;
+       u32 en_mask;
+       u32 pd_reg;
+       u32 tuner_reg;
+       u32 tuner_en_reg;
+       u8 tuner_en_bit;
+       int pd_shift;
+       unsigned int flags;
+       const struct clk_ops *ops;
+       u32 rst_bar_mask;
+       unsigned long fmin;
+       unsigned long fmax;
+       int pcwbits;
+       int pcwibits;
+       u32 pcw_reg;
+       int pcw_shift;
+       u32 pcw_chg_reg;
+       const struct mtk_pll_div_table *div_table;
+       const char *parent_name;
+       u32 en_reg;
+       u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
+};
+
+int mtk_clk_register_plls(struct device_node *node,
+                         const struct mtk_pll_data *plls, int num_plls,
+                         struct clk_onecell_data *clk_data);
+void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
+                            struct clk_onecell_data *clk_data);
+
+#endif /* __DRV_CLK_MTK_PLL_H */
index ffe464c..bcec4b8 100644 (file)
@@ -100,8 +100,7 @@ static void mtk_register_reset_controller_common(struct device_node *np,
 
        regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap)) {
-               pr_err("Cannot find regmap for %pOF: %ld\n", np,
-                               PTR_ERR(regmap));
+               pr_err("Cannot find regmap for %pOF: %pe\n", np, regmap);
                return;
        }
 
index cd0f5ba..8f3b7a9 100644 (file)
@@ -2232,7 +2232,7 @@ static struct clk_regmap meson8b_vpu_1 = {
 };
 
 /*
- * The VPU clock has two two identical clock trees (vpu_0 and vpu_1)
+ * The VPU clock has two identical clock trees (vpu_0 and vpu_1)
  * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can
  * actually manage this glitch-free mux because it does top-to-bottom
  * updates the each clock tree and switches to the "inactive" one when
diff --git a/drivers/clk/microchip/Kconfig b/drivers/clk/microchip/Kconfig
new file mode 100644 (file)
index 0000000..a5a9987
--- /dev/null
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config COMMON_CLK_PIC32
+       def_bool COMMON_CLK && MACH_PIC32
+
+config MCHP_CLK_MPFS
+       bool "Clk driver for PolarFire SoC"
+       depends on (RISCV && SOC_MICROCHIP_POLARFIRE) || COMPILE_TEST
+       help
+         Supports Clock Configuration for PolarFire SoC
index f34b247..5fa6dcf 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_COMMON_CLK_PIC32) += clk-core.o
 obj-$(CONFIG_PIC32MZDA) += clk-pic32mzda.o
+obj-$(CONFIG_MCHP_CLK_MPFS) += clk-mpfs.o
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
new file mode 100644 (file)
index 0000000..aa1561b
--- /dev/null
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Daire McNamara,<daire.mcnamara@microchip.com>
+ * Copyright (C) 2020 Microchip Technology Inc.  All rights reserved.
+ */
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <dt-bindings/clock/microchip,mpfs-clock.h>
+
+/* address offset of control registers */
+#define REG_CLOCK_CONFIG_CR    0x08u
+#define REG_SUBBLK_CLOCK_CR    0x84u
+#define REG_SUBBLK_RESET_CR    0x88u
+
+struct mpfs_clock_data {
+       void __iomem *base;
+       struct clk_hw_onecell_data hw_data;
+};
+
+struct mpfs_cfg_clock {
+       const struct clk_div_table *table;
+       unsigned int id;
+       u8 shift;
+       u8 width;
+};
+
+struct mpfs_cfg_hw_clock {
+       struct mpfs_cfg_clock cfg;
+       void __iomem *sys_base;
+       struct clk_hw hw;
+       struct clk_init_data init;
+};
+
+#define to_mpfs_cfg_clk(_hw) container_of(_hw, struct mpfs_cfg_hw_clock, hw)
+
+struct mpfs_periph_clock {
+       unsigned int id;
+       u8 shift;
+};
+
+struct mpfs_periph_hw_clock {
+       struct mpfs_periph_clock periph;
+       void __iomem *sys_base;
+       struct clk_hw hw;
+};
+
+#define to_mpfs_periph_clk(_hw) container_of(_hw, struct mpfs_periph_hw_clock, hw)
+
+/*
+ * mpfs_clk_lock prevents anything else from writing to the
+ * mpfs clk block while a software locked register is being written.
+ */
+static DEFINE_SPINLOCK(mpfs_clk_lock);
+
+static const struct clk_parent_data mpfs_cfg_parent[] = {
+       { .index = 0 },
+};
+
+static const struct clk_div_table mpfs_div_cpu_axi_table[] = {
+       { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
+       { 0, 0 }
+};
+
+static const struct clk_div_table mpfs_div_ahb_table[] = {
+       { 1, 2 }, { 2, 4}, { 3, 8 },
+       { 0, 0 }
+};
+
+static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+       struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+       struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+       void __iomem *base_addr = cfg_hw->sys_base;
+       u32 val;
+
+       val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR) >> cfg->shift;
+       val &= clk_div_mask(cfg->width);
+
+       return prate / (1u << val);
+}
+
+static long mpfs_cfg_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+{
+       struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+       struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+
+       return divider_round_rate(hw, rate, prate, cfg->table, cfg->width, 0);
+}
+
+static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
+{
+       struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+       struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+       void __iomem *base_addr = cfg_hw->sys_base;
+       unsigned long flags;
+       u32 val;
+       int divider_setting;
+
+       divider_setting = divider_get_val(rate, prate, cfg->table, cfg->width, 0);
+
+       if (divider_setting < 0)
+               return divider_setting;
+
+       spin_lock_irqsave(&mpfs_clk_lock, flags);
+
+       val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR);
+       val &= ~(clk_div_mask(cfg->width) << cfg_hw->cfg.shift);
+       val |= divider_setting << cfg->shift;
+       writel_relaxed(val, base_addr + REG_CLOCK_CONFIG_CR);
+
+       spin_unlock_irqrestore(&mpfs_clk_lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops mpfs_clk_cfg_ops = {
+       .recalc_rate = mpfs_cfg_clk_recalc_rate,
+       .round_rate = mpfs_cfg_clk_round_rate,
+       .set_rate = mpfs_cfg_clk_set_rate,
+};
+
+#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags) {         \
+       .cfg.id = _id,                                                          \
+       .cfg.shift = _shift,                                                    \
+       .cfg.width = _width,                                                    \
+       .cfg.table = _table,                                                    \
+       .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_cfg_ops,  \
+                                           _flags),                            \
+}
+
+static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
+       CLK_CFG(CLK_CPU, "clk_cpu", mpfs_cfg_parent, 0, 2, mpfs_div_cpu_axi_table, 0),
+       CLK_CFG(CLK_AXI, "clk_axi", mpfs_cfg_parent, 2, 2, mpfs_div_cpu_axi_table, 0),
+       CLK_CFG(CLK_AHB, "clk_ahb", mpfs_cfg_parent, 4, 2, mpfs_div_ahb_table, 0),
+};
+
+static int mpfs_clk_register_cfg(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hw,
+                                void __iomem *sys_base)
+{
+       cfg_hw->sys_base = sys_base;
+
+       return devm_clk_hw_register(dev, &cfg_hw->hw);
+}
+
+static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hws,
+                                 unsigned int num_clks, struct mpfs_clock_data *data)
+{
+       void __iomem *sys_base = data->base;
+       unsigned int i, id;
+       int ret;
+
+       for (i = 0; i < num_clks; i++) {
+               struct mpfs_cfg_hw_clock *cfg_hw = &cfg_hws[i];
+
+               ret = mpfs_clk_register_cfg(dev, cfg_hw, sys_base);
+               if (ret)
+                       return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
+                                            cfg_hw->cfg.id);
+
+               id = cfg_hws[i].cfg.id;
+               data->hw_data.hws[id] = &cfg_hw->hw;
+       }
+
+       return 0;
+}
+
+static int mpfs_periph_clk_enable(struct clk_hw *hw)
+{
+       struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+       struct mpfs_periph_clock *periph = &periph_hw->periph;
+       void __iomem *base_addr = periph_hw->sys_base;
+       u32 reg, val;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mpfs_clk_lock, flags);
+
+       reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
+       val = reg & ~(1u << periph->shift);
+       writel_relaxed(val, base_addr + REG_SUBBLK_RESET_CR);
+
+       reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
+       val = reg | (1u << periph->shift);
+       writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR);
+
+       spin_unlock_irqrestore(&mpfs_clk_lock, flags);
+
+       return 0;
+}
+
+static void mpfs_periph_clk_disable(struct clk_hw *hw)
+{
+       struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+       struct mpfs_periph_clock *periph = &periph_hw->periph;
+       void __iomem *base_addr = periph_hw->sys_base;
+       u32 reg, val;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mpfs_clk_lock, flags);
+
+       reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
+       val = reg | (1u << periph->shift);
+       writel_relaxed(val, base_addr + REG_SUBBLK_RESET_CR);
+
+       reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
+       val = reg & ~(1u << periph->shift);
+       writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR);
+
+       spin_unlock_irqrestore(&mpfs_clk_lock, flags);
+}
+
+static int mpfs_periph_clk_is_enabled(struct clk_hw *hw)
+{
+       struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+       struct mpfs_periph_clock *periph = &periph_hw->periph;
+       void __iomem *base_addr = periph_hw->sys_base;
+       u32 reg;
+
+       reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
+       if ((reg & (1u << periph->shift)) == 0u) {
+               reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
+               if (reg & (1u << periph->shift))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static const struct clk_ops mpfs_periph_clk_ops = {
+       .enable = mpfs_periph_clk_enable,
+       .disable = mpfs_periph_clk_disable,
+       .is_enabled = mpfs_periph_clk_is_enabled,
+};
+
+#define CLK_PERIPH(_id, _name, _parent, _shift, _flags) {                      \
+       .periph.id = _id,                                                       \
+       .periph.shift = _shift,                                                 \
+       .hw.init = CLK_HW_INIT_HW(_name, _parent, &mpfs_periph_clk_ops,         \
+                                 _flags),                                      \
+}
+
+#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT].hw)
+
+/*
+ * Critical clocks:
+ * - CLK_ENVM: reserved by hart software services (hss) superloop monitor/m mode interrupt
+ *   trap handler
+ * - CLK_MMUART0: reserved by the hss
+ * - CLK_DDRC: provides clock to the ddr subsystem
+ * - CLK_FICx: these provide clocks for sections of the fpga fabric, disabling them would
+ *   cause the fabric to go into reset
+ */
+
+static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
+       CLK_PERIPH(CLK_ENVM, "clk_periph_envm", PARENT_CLK(AHB), 0, CLK_IS_CRITICAL),
+       CLK_PERIPH(CLK_MAC0, "clk_periph_mac0", PARENT_CLK(AHB), 1, 0),
+       CLK_PERIPH(CLK_MAC1, "clk_periph_mac1", PARENT_CLK(AHB), 2, 0),
+       CLK_PERIPH(CLK_MMC, "clk_periph_mmc", PARENT_CLK(AHB), 3, 0),
+       CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(AHB), 4, 0),
+       CLK_PERIPH(CLK_MMUART0, "clk_periph_mmuart0", PARENT_CLK(AHB), 5, CLK_IS_CRITICAL),
+       CLK_PERIPH(CLK_MMUART1, "clk_periph_mmuart1", PARENT_CLK(AHB), 6, 0),
+       CLK_PERIPH(CLK_MMUART2, "clk_periph_mmuart2", PARENT_CLK(AHB), 7, 0),
+       CLK_PERIPH(CLK_MMUART3, "clk_periph_mmuart3", PARENT_CLK(AHB), 8, 0),
+       CLK_PERIPH(CLK_MMUART4, "clk_periph_mmuart4", PARENT_CLK(AHB), 9, 0),
+       CLK_PERIPH(CLK_SPI0, "clk_periph_spi0", PARENT_CLK(AHB), 10, 0),
+       CLK_PERIPH(CLK_SPI1, "clk_periph_spi1", PARENT_CLK(AHB), 11, 0),
+       CLK_PERIPH(CLK_I2C0, "clk_periph_i2c0", PARENT_CLK(AHB), 12, 0),
+       CLK_PERIPH(CLK_I2C1, "clk_periph_i2c1", PARENT_CLK(AHB), 13, 0),
+       CLK_PERIPH(CLK_CAN0, "clk_periph_can0", PARENT_CLK(AHB), 14, 0),
+       CLK_PERIPH(CLK_CAN1, "clk_periph_can1", PARENT_CLK(AHB), 15, 0),
+       CLK_PERIPH(CLK_USB, "clk_periph_usb", PARENT_CLK(AHB), 16, 0),
+       CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, 0),
+       CLK_PERIPH(CLK_QSPI, "clk_periph_qspi", PARENT_CLK(AHB), 19, 0),
+       CLK_PERIPH(CLK_GPIO0, "clk_periph_gpio0", PARENT_CLK(AHB), 20, 0),
+       CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0),
+       CLK_PERIPH(CLK_GPIO2, "clk_periph_gpio2", PARENT_CLK(AHB), 22, 0),
+       CLK_PERIPH(CLK_DDRC, "clk_periph_ddrc", PARENT_CLK(AHB), 23, CLK_IS_CRITICAL),
+       CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AHB), 24, CLK_IS_CRITICAL),
+       CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AHB), 25, CLK_IS_CRITICAL),
+       CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AHB), 26, CLK_IS_CRITICAL),
+       CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AHB), 27, CLK_IS_CRITICAL),
+       CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AHB), 28, 0),
+       CLK_PERIPH(CLK_CFM, "clk_periph_cfm", PARENT_CLK(AHB), 29, 0),
+};
+
+static int mpfs_clk_register_periph(struct device *dev, struct mpfs_periph_hw_clock *periph_hw,
+                                   void __iomem *sys_base)
+{
+       periph_hw->sys_base = sys_base;
+
+       return devm_clk_hw_register(dev, &periph_hw->hw);
+}
+
+static int mpfs_clk_register_periphs(struct device *dev, struct mpfs_periph_hw_clock *periph_hws,
+                                    int num_clks, struct mpfs_clock_data *data)
+{
+       void __iomem *sys_base = data->base;
+       unsigned int i, id;
+       int ret;
+
+       for (i = 0; i < num_clks; i++) {
+               struct mpfs_periph_hw_clock *periph_hw = &periph_hws[i];
+
+               ret = mpfs_clk_register_periph(dev, periph_hw, sys_base);
+               if (ret)
+                       return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
+                                            periph_hw->periph.id);
+
+               id = periph_hws[i].periph.id;
+               data->hw_data.hws[id] = &periph_hw->hw;
+       }
+
+       return 0;
+}
+
+static int mpfs_clk_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mpfs_clock_data *clk_data;
+       unsigned int num_clks;
+       int ret;
+
+       /* CLK_RESERVED is not part of cfg_clks nor periph_clks, so add 1 */
+       num_clks = ARRAY_SIZE(mpfs_cfg_clks) + ARRAY_SIZE(mpfs_periph_clks) + 1;
+
+       clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws, num_clks), GFP_KERNEL);
+       if (!clk_data)
+               return -ENOMEM;
+
+       clk_data->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(clk_data->base))
+               return PTR_ERR(clk_data->base);
+
+       clk_data->hw_data.num = num_clks;
+
+       ret = mpfs_clk_register_cfgs(dev, mpfs_cfg_clks, ARRAY_SIZE(mpfs_cfg_clks), clk_data);
+       if (ret)
+               return ret;
+
+       ret = mpfs_clk_register_periphs(dev, mpfs_periph_clks, ARRAY_SIZE(mpfs_periph_clks),
+                                       clk_data);
+       if (ret)
+               return ret;
+
+       ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, &clk_data->hw_data);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static const struct of_device_id mpfs_clk_of_match_table[] = {
+       { .compatible = "microchip,mpfs-clkcfg", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, mpfs_clk_match_table);
+
+static struct platform_driver mpfs_clk_driver = {
+       .probe = mpfs_clk_probe,
+       .driver = {
+               .name = "microchip-mpfs-clkcfg",
+               .of_match_table = mpfs_clk_of_match_table,
+       },
+};
+
+static int __init clk_mpfs_init(void)
+{
+       return platform_driver_register(&mpfs_clk_driver);
+}
+core_initcall(clk_mpfs_init);
+
+static void __exit clk_mpfs_exit(void)
+{
+       platform_driver_unregister(&mpfs_clk_driver);
+}
+module_exit(clk_mpfs_exit);
+
+MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver");
+MODULE_LICENSE("GPL v2");
index 0839fb2..50a7802 100644 (file)
@@ -317,9 +317,9 @@ static const char * const ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
 
 static DEFINE_SPINLOCK(gpu_lock);
 static const char * const mmp2_gpu_gc_parent_names[] =  {"pll1_2", "pll1_3", "pll2_2", "pll2_3", "pll2", "usb_pll"};
-static u32 mmp2_gpu_gc_parent_table[] =          { 0x0000,   0x0040,   0x0080,   0x00c0,   0x1000, 0x1040   };
+static const u32 mmp2_gpu_gc_parent_table[] = { 0x0000,   0x0040,   0x0080,   0x00c0,   0x1000, 0x1040   };
 static const char * const mmp2_gpu_bus_parent_names[] = {"pll1_4", "pll2",   "pll2_2", "usb_pll"};
-static u32 mmp2_gpu_bus_parent_table[] =         { 0x0000,   0x0020,   0x0030,   0x4020   };
+static const u32 mmp2_gpu_bus_parent_table[] = { 0x0000,   0x0020,   0x0030,   0x4020   };
 static const char * const mmp3_gpu_bus_parent_names[] = {"pll1_4", "pll1_6", "pll1_2", "pll2_2"};
 static const char * const mmp3_gpu_gc_parent_names[] =  {"pll1",   "pll2",   "pll1_p", "pll2_p"};
 
index ab57c0e..edaa243 100644 (file)
@@ -76,7 +76,7 @@ static int mmp_pm_domain_power_off(struct generic_pm_domain *genpd)
        if (pm_domain->lock)
                spin_lock_irqsave(pm_domain->lock, flags);
 
-       /* Turn off and isolate the the power island. */
+       /* Turn off and isolate the power island. */
        val = readl(pm_domain->reg);
        val &= ~pm_domain->power_on;
        val &= ~0x100;
index 32ac6b6..e3777ca 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/jiffies.h>
 
 #define TBG_SEL                0x0
 #define DIV_SEL0       0x4
@@ -541,7 +542,7 @@ static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
         * We are going to L0 with rate >= 1GHz. Check whether we have been at
         * L1 for long enough time. If not, go to L1 for 20ms.
         */
-       if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
+       if (pm_cpu->l1_expiration && time_is_before_eq_jiffies(pm_cpu->l1_expiration))
                goto invalidate_l1_exp;
 
        regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
index 8b686da..c23ac46 100644 (file)
@@ -457,9 +457,8 @@ static unsigned long lpc18xx_pll1_recalc_rate(struct clk_hw *hw,
        struct lpc18xx_pll *pll = to_lpc_pll(hw);
        u16 msel, nsel, psel;
        bool direct, fbsel;
-       u32 stat, ctrl;
+       u32 ctrl;
 
-       stat = readl(pll->reg + LPC18XX_CGU_PLL1_STAT);
        ctrl = readl(pll->reg + LPC18XX_CGU_PLL1_CTRL);
 
        direct = (ctrl & LPC18XX_PLL1_CTRL_DIRECT) ? true : false;
@@ -523,7 +522,7 @@ static struct lpc18xx_cgu_pll_clk lpc18xx_cgu_src_clk_plls[] = {
        LPC1XX_CGU_CLK_PLL(PLL1,        pll1_src_ids, pll1_ops),
 };
 
-static void lpc18xx_fill_parent_names(const char **parent, u32 *id, int size)
+static void lpc18xx_fill_parent_names(const char **parent, const u32 *id, int size)
 {
        int i;
 
index 76f492c..2a6d583 100644 (file)
@@ -154,7 +154,7 @@ static struct pistachio_pll pistachio_plls[] __initdata = {
 PNAME(mux_debug) = { "mips_pll_mux", "rpu_v_pll_mux",
                     "rpu_l_pll_mux", "sys_pll_mux",
                     "wifi_pll_mux", "bt_pll_mux" };
-static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
+static const u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
 
 static unsigned int pistachio_critical_clks_core[] __initdata = {
        CLK_MIPS
index 42c8741..d01436b 100644 (file)
@@ -29,11 +29,11 @@ config QCOM_A53PLL
          devices.
 
 config QCOM_A7PLL
-       tristate "SDX55 A7 PLL"
+       tristate "A7 PLL driver for SDX55 and SDX65"
        help
-         Support for the A7 PLL on SDX55 devices. It provides the CPU with
+         Support for the A7 PLL on SDX55 and SDX65 devices. It provides the CPU with
          frequencies above 1GHz.
-         Say Y if you want to support higher CPU frequencies on SDX55
+         Say Y if you want to support higher CPU frequencies on SDX55 and SDX65
          devices.
 
 config QCOM_CLK_APCS_MSM8916
@@ -55,13 +55,13 @@ config QCOM_CLK_APCC_MSM8996
          drivers for dynamic power management.
 
 config QCOM_CLK_APCS_SDX55
-       tristate "SDX55 APCS Clock Controller"
+       tristate "SDX55 and SDX65 APCS Clock Controller"
        depends on QCOM_APCS_IPC || COMPILE_TEST
        help
-         Support for the APCS Clock Controller on SDX55 platform. The
+         Support for the APCS Clock Controller on SDX55, SDX65 platforms. The
          APCS is managing the mux and divider which feeds the CPUs.
          Say Y if you want to support CPU frequency scaling on devices
-         such as SDX55.
+         such as SDX55, SDX65.
 
 config QCOM_CLK_RPM
        tristate "RPM based Clock Controller"
@@ -340,6 +340,15 @@ config QCM_GCC_2290
          Say Y if you want to use multimedia devices or peripheral
          devices such as UART, SPI, I2C, USB, SD/eMMC etc.
 
+config QCM_DISPCC_2290
+       tristate "QCM2290 Display Clock Controller"
+       select QCM_GCC_2290
+       help
+         Support for the display clock controller on Qualcomm Technologies, Inc
+         QCM2290 devices.
+         Say Y if you want to support display devices and functionality such as
+         splash screen.
+
 config QCS_GCC_404
        tristate "QCS404 Global Clock Controller"
        help
@@ -565,6 +574,14 @@ config SDX_GCC_55
          Say Y if you want to use peripheral devices such as UART,
          SPI, I2C, USB, SD/UFS, PCIe etc.
 
+config SDX_GCC_65
+       tristate "SDX65 Global Clock Controller"
+       select QCOM_GDSC
+       help
+         Support for the global clock controller on SDX65 devices.
+         Say Y if you want to use peripheral devices such as UART,
+         SPI, I2C, USB, SD/UFS, PCIe etc.
+
 config SM_CAMCC_8250
        tristate "SM8250 Camera Clock Controller"
        select SM_GCC_8250
@@ -572,13 +589,14 @@ config SM_CAMCC_8250
          Support for the camera clock controller on SM8250 devices.
          Say Y if you want to support camera devices and camera functionality.
 
-config SDX_GCC_65
-       tristate "SDX65 Global Clock Controller"
-       select QCOM_GDSC
+config SM_DISPCC_6125
+       tristate "SM6125 Display Clock Controller"
+       depends on SM_GCC_6125
        help
-         Support for the global clock controller on SDX65 devices.
-         Say Y if you want to use peripheral devices such as UART,
-         SPI, I2C, USB, SD/UFS, PCIe etc.
+         Support for the display clock controller on Qualcomm Technologies, Inc
+         SM6125 devices.
+         Say Y if you want to support display devices and functionality such as
+         splash screen
 
 config SM_DISPCC_8250
        tristate "SM8150 and SM8250 Display Clock Controller"
@@ -589,6 +607,15 @@ config SM_DISPCC_8250
          Say Y if you want to support display devices and functionality such as
          splash screen.
 
+config SM_DISPCC_6350
+       tristate "SM6350 Display Clock Controller"
+       depends on SM_GCC_6350
+       help
+         Support for the display clock controller on Qualcomm Technologies, Inc
+         SM6350 devices.
+         Say Y if you want to support display devices and functionality such as
+         splash screen.
+
 config SM_GCC_6115
        tristate "SM6115 and SM4250 Global Clock Controller"
        help
@@ -642,6 +669,14 @@ config SM_GCC_8450
          Say Y if you want to use peripheral devices such as UART,
          SPI, I2C, USB, SD/UFS, PCIe etc.
 
+config SM_GPUCC_6350
+       tristate "SM6350 Graphics Clock Controller"
+       select SM_GCC_6350
+       help
+         Support for the graphics clock controller on SM6350 devices.
+         Say Y if you want to support graphics controller devices and
+         functionality such as 3D graphics.
+
 config SM_GPUCC_8150
        tristate "SM8150 Graphics Clock Controller"
        select SM_GCC_8150
index 0d98ca9..671cf58 100644 (file)
@@ -56,6 +56,7 @@ obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
 obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
 obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
 obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
+obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
 obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
 obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
 obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
@@ -83,8 +84,10 @@ obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
 obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
 obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
-obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
 obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
+obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
+obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
+obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
 obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
 obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
 obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -93,6 +96,7 @@ obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
 obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
 obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
 obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
+obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
 obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
 obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
 obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
index ce73ee9..e2b4804 100644 (file)
@@ -29,7 +29,6 @@ enum {
        P_CAM_CC_PLL2_OUT_AUX,
        P_CAM_CC_PLL2_OUT_EARLY,
        P_CAM_CC_PLL3_OUT_MAIN,
-       P_CORE_BI_PLL_TEST_SE,
 };
 
 static const struct pll_vco agera_vco[] = {
@@ -127,7 +126,9 @@ static struct clk_fixed_factor cam_cc_pll2_out_early = {
        .div = 2,
        .hw.init = &(struct clk_init_data){
                .name = "cam_cc_pll2_out_early",
-               .parent_names = (const char *[]){ "cam_cc_pll2" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &cam_cc_pll2.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_fixed_factor_ops,
        },
@@ -147,8 +148,8 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = {
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_AGERA],
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_pll2_out_aux",
-               .parent_data = &(const struct clk_parent_data){
-                       .hw = &cam_cc_pll2.clkr.hw,
+               .parent_hws = (const struct clk_hw*[]){
+                       &cam_cc_pll2.clkr.hw,
                },
                .num_parents = 1,
                .flags = CLK_SET_RATE_PARENT,
@@ -187,26 +188,22 @@ static const struct parent_map cam_cc_parent_map_0[] = {
        { P_BI_TCXO, 0 },
        { P_CAM_CC_PLL1_OUT_EVEN, 2 },
        { P_CAM_CC_PLL0_OUT_EVEN, 6 },
-       { P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
 static const struct clk_parent_data cam_cc_parent_data_0[] = {
        { .fw_name = "bi_tcxo" },
        { .hw = &cam_cc_pll1.clkr.hw },
        { .hw = &cam_cc_pll0.clkr.hw },
-       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
 };
 
 static const struct parent_map cam_cc_parent_map_1[] = {
        { P_BI_TCXO, 0 },
        { P_CAM_CC_PLL2_OUT_AUX, 1 },
-       { P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
 static const struct clk_parent_data cam_cc_parent_data_1[] = {
        { .fw_name = "bi_tcxo" },
        { .hw = &cam_cc_pll2_out_aux.clkr.hw },
-       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
 };
 
 static const struct parent_map cam_cc_parent_map_2[] = {
@@ -214,7 +211,6 @@ static const struct parent_map cam_cc_parent_map_2[] = {
        { P_CAM_CC_PLL2_OUT_EARLY, 4 },
        { P_CAM_CC_PLL3_OUT_MAIN, 5 },
        { P_CAM_CC_PLL0_OUT_EVEN, 6 },
-       { P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
 static const struct clk_parent_data cam_cc_parent_data_2[] = {
@@ -222,7 +218,6 @@ static const struct clk_parent_data cam_cc_parent_data_2[] = {
        { .hw = &cam_cc_pll2_out_early.hw },
        { .hw = &cam_cc_pll3.clkr.hw },
        { .hw = &cam_cc_pll0.clkr.hw },
-       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
 };
 
 static const struct parent_map cam_cc_parent_map_3[] = {
@@ -231,7 +226,6 @@ static const struct parent_map cam_cc_parent_map_3[] = {
        { P_CAM_CC_PLL2_OUT_EARLY, 4 },
        { P_CAM_CC_PLL3_OUT_MAIN, 5 },
        { P_CAM_CC_PLL0_OUT_EVEN, 6 },
-       { P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
 static const struct clk_parent_data cam_cc_parent_data_3[] = {
@@ -240,33 +234,28 @@ static const struct clk_parent_data cam_cc_parent_data_3[] = {
        { .hw = &cam_cc_pll2_out_early.hw },
        { .hw = &cam_cc_pll3.clkr.hw },
        { .hw = &cam_cc_pll0.clkr.hw },
-       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
 };
 
 static const struct parent_map cam_cc_parent_map_4[] = {
        { P_BI_TCXO, 0 },
        { P_CAM_CC_PLL3_OUT_MAIN, 5 },
        { P_CAM_CC_PLL0_OUT_EVEN, 6 },
-       { P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
 static const struct clk_parent_data cam_cc_parent_data_4[] = {
        { .fw_name = "bi_tcxo" },
        { .hw = &cam_cc_pll3.clkr.hw },
        { .hw = &cam_cc_pll0.clkr.hw },
-       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
 };
 
 static const struct parent_map cam_cc_parent_map_5[] = {
        { P_BI_TCXO, 0 },
        { P_CAM_CC_PLL0_OUT_EVEN, 6 },
-       { P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
 static const struct clk_parent_data cam_cc_parent_data_5[] = {
        { .fw_name = "bi_tcxo" },
        { .hw = &cam_cc_pll0.clkr.hw },
-       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
 };
 
 static const struct parent_map cam_cc_parent_map_6[] = {
@@ -274,7 +263,6 @@ static const struct parent_map cam_cc_parent_map_6[] = {
        { P_CAM_CC_PLL1_OUT_EVEN, 2 },
        { P_CAM_CC_PLL3_OUT_MAIN, 5 },
        { P_CAM_CC_PLL0_OUT_EVEN, 6 },
-       { P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
 static const struct clk_parent_data cam_cc_parent_data_6[] = {
@@ -282,7 +270,6 @@ static const struct clk_parent_data cam_cc_parent_data_6[] = {
        { .hw = &cam_cc_pll1.clkr.hw },
        { .hw = &cam_cc_pll3.clkr.hw },
        { .hw = &cam_cc_pll0.clkr.hw },
-       { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
 };
 
 static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
@@ -303,7 +290,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_bps_clk_src",
                .parent_data = cam_cc_parent_data_2,
-               .num_parents = 5,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -324,7 +311,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_cci_0_clk_src",
                .parent_data = cam_cc_parent_data_5,
-               .num_parents = 3,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -338,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_cci_1_clk_src",
                .parent_data = cam_cc_parent_data_5,
-               .num_parents = 3,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -359,7 +346,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_cphy_rx_clk_src",
                .parent_data = cam_cc_parent_data_3,
-               .num_parents = 6,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -378,7 +365,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_csi0phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -392,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_csi1phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -406,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_csi2phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -420,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_csi3phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -442,7 +429,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_fast_ahb_clk_src",
                .parent_data = cam_cc_parent_data_0,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -465,7 +452,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_icp_clk_src",
                .parent_data = cam_cc_parent_data_2,
-               .num_parents = 5,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -487,7 +474,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_0_clk_src",
                .parent_data = cam_cc_parent_data_4,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -509,7 +496,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_0_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
-               .num_parents = 6,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -523,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_1_clk_src",
                .parent_data = cam_cc_parent_data_4,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -537,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_1_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
-               .num_parents = 6,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -551,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_lite_clk_src",
                .parent_data = cam_cc_parent_data_4,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -566,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_lite_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
-               .num_parents = 6,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -589,7 +576,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ipe_0_clk_src",
                .parent_data = cam_cc_parent_data_2,
-               .num_parents = 5,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -612,7 +599,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_jpeg_clk_src",
                .parent_data = cam_cc_parent_data_2,
-               .num_parents = 5,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -634,7 +621,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_lrme_clk_src",
                .parent_data = cam_cc_parent_data_6,
-               .num_parents = 5,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -655,7 +642,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk0_clk_src",
                .parent_data = cam_cc_parent_data_1,
-               .num_parents = 3,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -669,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk1_clk_src",
                .parent_data = cam_cc_parent_data_1,
-               .num_parents = 3,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -683,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk2_clk_src",
                .parent_data = cam_cc_parent_data_1,
-               .num_parents = 3,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -697,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk3_clk_src",
                .parent_data = cam_cc_parent_data_1,
-               .num_parents = 3,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -711,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk4_clk_src",
                .parent_data = cam_cc_parent_data_1,
-               .num_parents = 3,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -730,7 +717,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_slow_ahb_clk_src",
                .parent_data = cam_cc_parent_data_0,
-               .num_parents = 4,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -744,8 +731,8 @@ static struct clk_branch cam_cc_bps_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_bps_ahb_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_slow_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -762,8 +749,8 @@ static struct clk_branch cam_cc_bps_areg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_bps_areg_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_fast_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -793,8 +780,8 @@ static struct clk_branch cam_cc_bps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_bps_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_bps_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_bps_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -824,8 +811,8 @@ static struct clk_branch cam_cc_cci_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_cci_0_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cci_0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -842,8 +829,8 @@ static struct clk_branch cam_cc_cci_1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_cci_1_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cci_1_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -860,8 +847,8 @@ static struct clk_branch cam_cc_core_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_core_ahb_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_slow_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -878,8 +865,8 @@ static struct clk_branch cam_cc_cpas_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_cpas_ahb_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_slow_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -896,8 +883,8 @@ static struct clk_branch cam_cc_csi0phytimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csi0phytimer_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_csi0phytimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -914,8 +901,8 @@ static struct clk_branch cam_cc_csi1phytimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csi1phytimer_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_csi1phytimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -932,8 +919,8 @@ static struct clk_branch cam_cc_csi2phytimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csi2phytimer_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_csi2phytimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -950,8 +937,8 @@ static struct clk_branch cam_cc_csi3phytimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csi3phytimer_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_csi3phytimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -968,8 +955,8 @@ static struct clk_branch cam_cc_csiphy0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csiphy0_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -986,8 +973,8 @@ static struct clk_branch cam_cc_csiphy1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csiphy1_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1004,8 +991,8 @@ static struct clk_branch cam_cc_csiphy2_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csiphy2_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1022,8 +1009,8 @@ static struct clk_branch cam_cc_csiphy3_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csiphy3_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1040,8 +1027,8 @@ static struct clk_branch cam_cc_icp_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_icp_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_icp_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_icp_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1071,8 +1058,8 @@ static struct clk_branch cam_cc_ife_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_0_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1089,8 +1076,8 @@ static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_0_cphy_rx_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1107,8 +1094,8 @@ static struct clk_branch cam_cc_ife_0_csid_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_0_csid_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ife_0_csid_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_0_csid_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1125,8 +1112,8 @@ static struct clk_branch cam_cc_ife_0_dsp_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_0_dsp_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1156,8 +1143,8 @@ static struct clk_branch cam_cc_ife_1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_1_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_1_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1174,8 +1161,8 @@ static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_1_cphy_rx_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1192,8 +1179,8 @@ static struct clk_branch cam_cc_ife_1_csid_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_1_csid_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ife_1_csid_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_1_csid_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1210,8 +1197,8 @@ static struct clk_branch cam_cc_ife_1_dsp_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_1_dsp_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_1_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1228,8 +1215,8 @@ static struct clk_branch cam_cc_ife_lite_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_lite_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_lite_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1246,8 +1233,8 @@ static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_lite_cphy_rx_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1264,8 +1251,8 @@ static struct clk_branch cam_cc_ife_lite_csid_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_lite_csid_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_lite_csid_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1282,8 +1269,8 @@ static struct clk_branch cam_cc_ipe_0_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_0_ahb_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_slow_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1300,8 +1287,8 @@ static struct clk_branch cam_cc_ipe_0_areg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_0_areg_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_fast_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1331,8 +1318,8 @@ static struct clk_branch cam_cc_ipe_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_0_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_ipe_0_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ipe_0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1349,8 +1336,8 @@ static struct clk_branch cam_cc_jpeg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_jpeg_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_jpeg_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1367,8 +1354,8 @@ static struct clk_branch cam_cc_lrme_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_lrme_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_lrme_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_lrme_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1385,8 +1372,8 @@ static struct clk_branch cam_cc_mclk0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk0_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1403,8 +1390,8 @@ static struct clk_branch cam_cc_mclk1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk1_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk1_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1421,8 +1408,8 @@ static struct clk_branch cam_cc_mclk2_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk2_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk2_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1439,8 +1426,8 @@ static struct clk_branch cam_cc_mclk3_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk3_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk3_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1457,8 +1444,8 @@ static struct clk_branch cam_cc_mclk4_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk4_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk4_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
index 1b2cefe..be3f953 100644 (file)
@@ -23,25 +23,6 @@ enum {
        P_CAM_CC_PLL1_OUT_EVEN,
        P_CAM_CC_PLL2_OUT_EVEN,
        P_CAM_CC_PLL3_OUT_EVEN,
-       P_CORE_BI_PLL_TEST_SE,
-};
-
-static const struct parent_map cam_cc_parent_map_0[] = {
-       { P_BI_TCXO, 0 },
-       { P_CAM_CC_PLL2_OUT_EVEN, 1 },
-       { P_CAM_CC_PLL1_OUT_EVEN, 2 },
-       { P_CAM_CC_PLL3_OUT_EVEN, 5 },
-       { P_CAM_CC_PLL0_OUT_EVEN, 6 },
-       { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const cam_cc_parent_names_0[] = {
-       "bi_tcxo",
-       "cam_cc_pll2_out_even",
-       "cam_cc_pll1_out_even",
-       "cam_cc_pll3_out_even",
-       "cam_cc_pll0_out_even",
-       "core_bi_pll_test_se",
 };
 
 static struct clk_alpha_pll cam_cc_pll0 = {
@@ -50,7 +31,9 @@ static struct clk_alpha_pll cam_cc_pll0 = {
        .clkr = {
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_pll0",
-                       .parent_names = (const char *[]){ "bi_tcxo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo", .name = "bi_tcxo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_alpha_pll_fabia_ops,
                },
@@ -72,7 +55,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_pll0_out_even",
-               .parent_names = (const char *[]){ "cam_cc_pll0" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &cam_cc_pll0.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_alpha_pll_postdiv_fabia_ops,
        },
@@ -84,7 +69,9 @@ static struct clk_alpha_pll cam_cc_pll1 = {
        .clkr = {
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_pll1",
-                       .parent_names = (const char *[]){ "bi_tcxo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo", .name = "bi_tcxo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_alpha_pll_fabia_ops,
                },
@@ -100,7 +87,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_pll1_out_even",
-               .parent_names = (const char *[]){ "cam_cc_pll1" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &cam_cc_pll1.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_alpha_pll_postdiv_fabia_ops,
        },
@@ -112,7 +101,9 @@ static struct clk_alpha_pll cam_cc_pll2 = {
        .clkr = {
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_pll2",
-                       .parent_names = (const char *[]){ "bi_tcxo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo", .name = "bi_tcxo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_alpha_pll_fabia_ops,
                },
@@ -128,7 +119,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = {
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_pll2_out_even",
-               .parent_names = (const char *[]){ "cam_cc_pll2" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &cam_cc_pll2.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_alpha_pll_postdiv_fabia_ops,
        },
@@ -140,7 +133,9 @@ static struct clk_alpha_pll cam_cc_pll3 = {
        .clkr = {
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_pll3",
-                       .parent_names = (const char *[]){ "bi_tcxo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo", .name = "bi_tcxo",
+                       },
                        .num_parents = 1,
                        .ops = &clk_alpha_pll_fabia_ops,
                },
@@ -156,12 +151,30 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_pll3_out_even",
-               .parent_names = (const char *[]){ "cam_cc_pll3" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &cam_cc_pll3.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_alpha_pll_postdiv_fabia_ops,
        },
 };
 
+static const struct parent_map cam_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_CAM_CC_PLL2_OUT_EVEN, 1 },
+       { P_CAM_CC_PLL1_OUT_EVEN, 2 },
+       { P_CAM_CC_PLL3_OUT_EVEN, 5 },
+       { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+       { .hw = &cam_cc_pll2_out_even.clkr.hw },
+       { .hw = &cam_cc_pll1_out_even.clkr.hw },
+       { .hw = &cam_cc_pll3_out_even.clkr.hw },
+       { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
 static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
        F(19200000, P_BI_TCXO, 1, 0, 0),
        F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
@@ -189,8 +202,8 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
        .freq_tbl = ftbl_cam_cc_bps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_bps_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -212,8 +225,8 @@ static struct clk_rcg2 cam_cc_cci_clk_src = {
        .freq_tbl = ftbl_cam_cc_cci_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_cci_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -232,8 +245,8 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
        .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_cphy_rx_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -253,8 +266,8 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
        .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_csi0phytimer_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -268,8 +281,8 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
        .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_csi1phytimer_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -283,8 +296,8 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
        .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_csi2phytimer_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -298,8 +311,8 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
        .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_csi3phytimer_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -323,8 +336,8 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
        .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_fast_ahb_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -346,8 +359,8 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
        .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_fd_core_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -369,8 +382,8 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
        .freq_tbl = ftbl_cam_cc_icp_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_icp_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -393,8 +406,8 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
        .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_0_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -416,8 +429,8 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
        .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_0_csid_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -430,8 +443,8 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
        .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_1_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -445,8 +458,8 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
        .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_1_csid_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -459,8 +472,8 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
        .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_lite_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -474,8 +487,8 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
        .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ife_lite_csid_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .ops = &clk_rcg2_shared_ops,
        },
 };
@@ -499,8 +512,8 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
        .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ipe_0_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -514,8 +527,8 @@ static struct clk_rcg2 cam_cc_ipe_1_clk_src = {
        .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_ipe_1_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -529,8 +542,8 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
        .freq_tbl = ftbl_cam_cc_bps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_jpeg_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -554,8 +567,8 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
        .freq_tbl = ftbl_cam_cc_lrme_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_lrme_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_shared_ops,
        },
@@ -577,8 +590,8 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
        .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk0_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -592,8 +605,8 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
        .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk1_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -607,8 +620,8 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
        .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk2_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -622,8 +635,8 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
        .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_mclk3_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -646,8 +659,8 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
        .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "cam_cc_slow_ahb_clk_src",
-               .parent_names = cam_cc_parent_names_0,
-               .num_parents = 6,
+               .parent_data = cam_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_rcg2_ops,
        },
@@ -661,8 +674,8 @@ static struct clk_branch cam_cc_bps_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_bps_ahb_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_slow_ahb_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_slow_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -679,8 +692,8 @@ static struct clk_branch cam_cc_bps_areg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_bps_areg_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_fast_ahb_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_fast_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -710,8 +723,8 @@ static struct clk_branch cam_cc_bps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_bps_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_bps_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_bps_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -754,8 +767,8 @@ static struct clk_branch cam_cc_cci_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_cci_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_cci_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cci_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -772,8 +785,8 @@ static struct clk_branch cam_cc_cpas_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_cpas_ahb_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_slow_ahb_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_slow_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -790,8 +803,8 @@ static struct clk_branch cam_cc_csi0phytimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csi0phytimer_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_csi0phytimer_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_csi0phytimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -808,8 +821,8 @@ static struct clk_branch cam_cc_csi1phytimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csi1phytimer_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_csi1phytimer_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_csi1phytimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -826,8 +839,8 @@ static struct clk_branch cam_cc_csi2phytimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csi2phytimer_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_csi2phytimer_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_csi2phytimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -844,8 +857,8 @@ static struct clk_branch cam_cc_csi3phytimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csi3phytimer_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_csi3phytimer_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_csi3phytimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -862,8 +875,8 @@ static struct clk_branch cam_cc_csiphy0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csiphy0_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_cphy_rx_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -880,8 +893,8 @@ static struct clk_branch cam_cc_csiphy1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csiphy1_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_cphy_rx_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -898,8 +911,8 @@ static struct clk_branch cam_cc_csiphy2_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csiphy2_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_cphy_rx_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -916,8 +929,8 @@ static struct clk_branch cam_cc_csiphy3_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_csiphy3_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_cphy_rx_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -934,8 +947,8 @@ static struct clk_branch cam_cc_fd_core_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_fd_core_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_fd_core_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_fd_core_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -952,8 +965,8 @@ static struct clk_branch cam_cc_fd_core_uar_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_fd_core_uar_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_fd_core_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_fd_core_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
@@ -995,8 +1008,8 @@ static struct clk_branch cam_cc_icp_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_icp_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_icp_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_icp_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1052,8 +1065,8 @@ static struct clk_branch cam_cc_ife_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_0_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ife_0_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1070,8 +1083,8 @@ static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_0_cphy_rx_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_cphy_rx_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1088,8 +1101,8 @@ static struct clk_branch cam_cc_ife_0_csid_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_0_csid_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ife_0_csid_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_0_csid_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1106,8 +1119,8 @@ static struct clk_branch cam_cc_ife_0_dsp_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_0_dsp_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ife_0_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
@@ -1136,8 +1149,8 @@ static struct clk_branch cam_cc_ife_1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_1_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ife_1_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_1_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1154,8 +1167,8 @@ static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_1_cphy_rx_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_cphy_rx_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1172,8 +1185,8 @@ static struct clk_branch cam_cc_ife_1_csid_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_1_csid_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ife_1_csid_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_1_csid_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1190,8 +1203,8 @@ static struct clk_branch cam_cc_ife_1_dsp_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_1_dsp_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ife_1_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_1_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
@@ -1207,8 +1220,8 @@ static struct clk_branch cam_cc_ife_lite_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_lite_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ife_lite_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_lite_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1225,8 +1238,8 @@ static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_lite_cphy_rx_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_cphy_rx_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_cphy_rx_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1243,8 +1256,8 @@ static struct clk_branch cam_cc_ife_lite_csid_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ife_lite_csid_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ife_lite_csid_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ife_lite_csid_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1261,8 +1274,8 @@ static struct clk_branch cam_cc_ipe_0_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_0_ahb_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_slow_ahb_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_slow_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1279,8 +1292,8 @@ static struct clk_branch cam_cc_ipe_0_areg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_0_areg_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_fast_ahb_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_fast_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1310,8 +1323,8 @@ static struct clk_branch cam_cc_ipe_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_0_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ipe_0_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ipe_0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1328,8 +1341,8 @@ static struct clk_branch cam_cc_ipe_1_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_1_ahb_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_slow_ahb_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_slow_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1346,8 +1359,8 @@ static struct clk_branch cam_cc_ipe_1_areg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_1_areg_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_fast_ahb_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_fast_ahb_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1377,8 +1390,8 @@ static struct clk_branch cam_cc_ipe_1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_ipe_1_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_ipe_1_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_ipe_1_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1395,8 +1408,8 @@ static struct clk_branch cam_cc_jpeg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_jpeg_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_jpeg_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_jpeg_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1413,8 +1426,8 @@ static struct clk_branch cam_cc_lrme_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_lrme_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_lrme_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_lrme_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1431,8 +1444,8 @@ static struct clk_branch cam_cc_mclk0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk0_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_mclk0_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk0_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1449,8 +1462,8 @@ static struct clk_branch cam_cc_mclk1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk1_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_mclk1_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk1_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1467,8 +1480,8 @@ static struct clk_branch cam_cc_mclk2_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk2_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_mclk2_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk2_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1485,8 +1498,8 @@ static struct clk_branch cam_cc_mclk3_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "cam_cc_mclk3_clk",
-                       .parent_names = (const char *[]){
-                               "cam_cc_mclk3_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &cam_cc_mclk3_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
index a9d181d..88845ba 100644 (file)
@@ -526,6 +526,19 @@ static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
        return __clk_rcg_set_rate(rcg, f);
 }
 
+static int clk_rcg_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+                                 unsigned long parent_rate)
+{
+       struct clk_rcg *rcg = to_clk_rcg(hw);
+       const struct freq_tbl *f;
+
+       f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+       if (!f)
+               return -EINVAL;
+
+       return __clk_rcg_set_rate(rcg, f);
+}
+
 static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
                                unsigned long parent_rate)
 {
@@ -816,6 +829,17 @@ const struct clk_ops clk_rcg_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_rcg_ops);
 
+const struct clk_ops clk_rcg_floor_ops = {
+       .enable = clk_enable_regmap,
+       .disable = clk_disable_regmap,
+       .get_parent = clk_rcg_get_parent,
+       .set_parent = clk_rcg_set_parent,
+       .recalc_rate = clk_rcg_recalc_rate,
+       .determine_rate = clk_rcg_determine_rate,
+       .set_rate = clk_rcg_set_floor_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_floor_ops);
+
 const struct clk_ops clk_rcg_bypass_ops = {
        .enable = clk_enable_regmap,
        .disable = clk_disable_regmap,
index 99efcc7..00cea50 100644 (file)
@@ -86,6 +86,7 @@ struct clk_rcg {
 };
 
 extern const struct clk_ops clk_rcg_ops;
+extern const struct clk_ops clk_rcg_floor_ops;
 extern const struct clk_ops clk_rcg_bypass_ops;
 extern const struct clk_ops clk_rcg_bypass2_ops;
 extern const struct clk_ops clk_rcg_pixel_ops;
index e1b1b42..f675fd9 100644 (file)
@@ -264,7 +264,7 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
 
 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
 {
-       u32 cfg, mask;
+       u32 cfg, mask, d_val, not2d_val, n_minus_m;
        struct clk_hw *hw = &rcg->clkr.hw;
        int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
 
@@ -283,8 +283,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
                if (ret)
                        return ret;
 
+               /* Calculate 2d value */
+               d_val = f->n;
+
+               n_minus_m = f->n - f->m;
+               n_minus_m *= 2;
+
+               d_val = clamp_t(u32, d_val, f->m, n_minus_m);
+               not2d_val = ~d_val & mask;
+
                ret = regmap_update_bits(rcg->clkr.regmap,
-                               RCG_D_OFFSET(rcg), mask, ~f->n);
+                               RCG_D_OFFSET(rcg), mask, not2d_val);
                if (ret)
                        return ret;
        }
@@ -720,6 +729,7 @@ static const struct frac_entry frac_table_pixel[] = {
        { 2, 9 },
        { 4, 9 },
        { 1, 1 },
+       { 2, 3 },
        { }
 };
 
index 74e57c8..aed9079 100644 (file)
@@ -512,6 +512,23 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
        .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
 };
 
+DEFINE_CLK_RPMH_VRM(sc8280xp, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
+
+static struct clk_hw *sc8280xp_rpmh_clocks[] = {
+       [RPMH_CXO_CLK]          = &sdm845_bi_tcxo.hw,
+       [RPMH_CXO_CLK_A]        = &sdm845_bi_tcxo_ao.hw,
+       [RPMH_LN_BB_CLK3]       = &sc8280xp_ln_bb_clk3.hw,
+       [RPMH_LN_BB_CLK3_A]     = &sc8280xp_ln_bb_clk3_ao.hw,
+       [RPMH_IPA_CLK]          = &sdm845_ipa.hw,
+       [RPMH_PKA_CLK]          = &sm8350_pka.hw,
+       [RPMH_HWKM_CLK]         = &sm8350_hwkm.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sc8280xp = {
+       .clks = sc8280xp_rpmh_clocks,
+       .num_clks = ARRAY_SIZE(sc8280xp_rpmh_clocks),
+};
+
 /* Resource name must match resource id present in cmd-db */
 DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4);
 
@@ -691,6 +708,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
 static const struct of_device_id clk_rpmh_match_table[] = {
        { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
        { .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
+       { .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
        { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
        { .compatible = "qcom,sdx55-rpmh-clk",  .data = &clk_rpmh_sdx55},
        { .compatible = "qcom,sdx65-rpmh-clk",  .data = &clk_rpmh_sdx65},
index ea28e45..afc6dc9 100644 (file)
@@ -413,6 +413,7 @@ static const struct clk_ops clk_smd_rpm_branch_ops = {
        .recalc_rate    = clk_smd_rpm_recalc_rate,
 };
 
+DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
 DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
 DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
 DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
@@ -604,7 +605,11 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8, 19200000);
 DEFINE_CLK_SMD_RPM(msm8992, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
 DEFINE_CLK_SMD_RPM(msm8992, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1);
 
+DEFINE_CLK_SMD_RPM_BRANCH(msm8992, mss_cfg_ahb_clk, mss_cfg_ahb_a_clk,
+                         QCOM_SMD_RPM_MCFG_CLK, 0, 19200000);
 static struct clk_smd_rpm *msm8992_clks[] = {
+       [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+       [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
        [RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk,
        [RPM_SMD_PNOC_A_CLK] = &msm8916_pcnoc_a_clk,
        [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk,
@@ -637,6 +642,8 @@ static struct clk_smd_rpm *msm8992_clks[] = {
        [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk,
        [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk,
        [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk,
+       [RPM_SMD_MSS_CFG_AHB_CLK] = &msm8992_mss_cfg_ahb_clk,
+       [RPM_SMD_MSS_CFG_AHB_A_CLK] = &msm8992_mss_cfg_ahb_a_clk,
        [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
        [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
        [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
@@ -661,6 +668,8 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8992 = {
 DEFINE_CLK_SMD_RPM(msm8994, ce3_clk, ce3_a_clk, QCOM_SMD_RPM_CE_CLK, 2);
 
 static struct clk_smd_rpm *msm8994_clks[] = {
+       [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+       [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
        [RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk,
        [RPM_SMD_PNOC_A_CLK] = &msm8916_pcnoc_a_clk,
        [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk,
@@ -693,6 +702,8 @@ static struct clk_smd_rpm *msm8994_clks[] = {
        [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk,
        [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk,
        [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk,
+       [RPM_SMD_MSS_CFG_AHB_CLK] = &msm8992_mss_cfg_ahb_clk,
+       [RPM_SMD_MSS_CFG_AHB_A_CLK] = &msm8992_mss_cfg_ahb_a_clk,
        [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
        [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
        [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
@@ -805,15 +816,18 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
        .num_clks = ARRAY_SIZE(qcs404_clks),
 };
 
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin,
-                                    3, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk3, ln_bb_clk3_a, 3, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin, 3, 19200000);
 DEFINE_CLK_SMD_RPM(msm8998, aggre1_noc_clk, aggre1_noc_a_clk,
                   QCOM_SMD_RPM_AGGR_CLK, 1);
 DEFINE_CLK_SMD_RPM(msm8998, aggre2_noc_clk, aggre2_noc_a_clk,
                   QCOM_SMD_RPM_AGGR_CLK, 2);
 DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6, 19200000);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6, 19200000);
+
 static struct clk_smd_rpm *msm8998_clks[] = {
+       [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+       [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
        [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
        [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
        [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
@@ -826,12 +840,22 @@ static struct clk_smd_rpm *msm8998_clks[] = {
        [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
        [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1,
        [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1,
+       [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2,
+       [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2,
+       [RPM_SMD_DIV_CLK3] = &msm8992_div_clk3,
+       [RPM_SMD_DIV_A_CLK3] = &msm8992_div_clk3_a,
        [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
        [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
        [RPM_SMD_LN_BB_CLK1] = &msm8916_bb_clk1,
        [RPM_SMD_LN_BB_CLK1_A] = &msm8916_bb_clk1_a,
        [RPM_SMD_LN_BB_CLK2] = &msm8916_bb_clk2,
        [RPM_SMD_LN_BB_CLK2_A] = &msm8916_bb_clk2_a,
+       [RPM_SMD_LN_BB_CLK3] = &msm8998_ln_bb_clk3,
+       [RPM_SMD_LN_BB_CLK3_A] = &msm8998_ln_bb_clk3_a,
+       [RPM_SMD_LN_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+       [RPM_SMD_LN_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+       [RPM_SMD_LN_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
+       [RPM_SMD_LN_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
        [RPM_SMD_LN_BB_CLK3_PIN] = &msm8998_ln_bb_clk3_pin,
        [RPM_SMD_LN_BB_CLK3_A_PIN] = &msm8998_ln_bb_clk3_a_pin,
        [RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
@@ -844,10 +868,14 @@ static struct clk_smd_rpm *msm8998_clks[] = {
        [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
        [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
        [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
-       [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
-       [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+       [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+       [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
        [RPM_SMD_RF_CLK3] = &msm8998_rf_clk3,
        [RPM_SMD_RF_CLK3_A] = &msm8998_rf_clk3_a,
+       [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
+       [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
+       [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
+       [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
        [RPM_SMD_RF_CLK3_PIN] = &msm8998_rf_clk3_pin,
        [RPM_SMD_RF_CLK3_A_PIN] = &msm8998_rf_clk3_a_pin,
 };
@@ -857,11 +885,6 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
        .num_clks = ARRAY_SIZE(msm8998_clks),
 };
 
-DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0,
-                                                               19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3, 19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, ln_bb_clk3_pin_a, 3, 19200000);
-
 static struct clk_smd_rpm *sdm660_clks[] = {
        [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
        [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
@@ -891,16 +914,16 @@ static struct clk_smd_rpm *sdm660_clks[] = {
        [RPM_SMD_LN_BB_A_CLK] = &msm8916_bb_clk1_a,
        [RPM_SMD_LN_BB_CLK2] = &msm8916_bb_clk2,
        [RPM_SMD_LN_BB_CLK2_A] = &msm8916_bb_clk2_a,
-       [RPM_SMD_LN_BB_CLK3] = &sdm660_ln_bb_clk3,
-       [RPM_SMD_LN_BB_CLK3_A] = &sdm660_ln_bb_clk3_a,
+       [RPM_SMD_LN_BB_CLK3] = &msm8998_ln_bb_clk3,
+       [RPM_SMD_LN_BB_CLK3_A] = &msm8998_ln_bb_clk3_a,
        [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
        [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
        [RPM_SMD_LN_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
        [RPM_SMD_LN_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
        [RPM_SMD_LN_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
        [RPM_SMD_LN_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
-       [RPM_SMD_LN_BB_CLK3_PIN] = &sdm660_ln_bb_clk3_pin,
-       [RPM_SMD_LN_BB_CLK3_A_PIN] = &sdm660_ln_bb_clk3_pin_a,
+       [RPM_SMD_LN_BB_CLK3_PIN] = &msm8998_ln_bb_clk3_pin,
+       [RPM_SMD_LN_BB_CLK3_A_PIN] = &msm8998_ln_bb_clk3_a_pin,
 };
 
 static const struct rpm_smd_clk_desc rpm_clk_sdm660 = {
@@ -1002,8 +1025,8 @@ static struct clk_smd_rpm *sm6125_clks[] = {
        [RPM_SMD_LN_BB_CLK1_A] = &msm8916_bb_clk1_a,
        [RPM_SMD_LN_BB_CLK2] = &msm8916_bb_clk2,
        [RPM_SMD_LN_BB_CLK2_A] = &msm8916_bb_clk2_a,
-       [RPM_SMD_LN_BB_CLK3] = &sdm660_ln_bb_clk3,
-       [RPM_SMD_LN_BB_CLK3_A] = &sdm660_ln_bb_clk3_a,
+       [RPM_SMD_LN_BB_CLK3] = &msm8998_ln_bb_clk3,
+       [RPM_SMD_LN_BB_CLK3_A] = &msm8998_ln_bb_clk3_a,
        [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
        [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
        [RPM_SMD_MMRT_CLK] = &sm6125_mmrt_clk,
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
new file mode 100644 (file)
index 0000000..96b1493
--- /dev/null
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-qcm2290.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+       P_BI_TCXO,
+       P_DISP_CC_PLL0_OUT_MAIN,
+       P_DSI0_PHY_PLL_OUT_BYTECLK,
+       P_DSI0_PHY_PLL_OUT_DSICLK,
+       P_DSI1_PHY_PLL_OUT_DSICLK,
+       P_GPLL0_OUT_MAIN,
+       P_SLEEP_CLK,
+};
+
+static const struct pll_vco spark_vco[] = {
+       { 500000000, 1000000000, 2 },
+};
+
+/* 768MHz configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+       .l = 0x28,
+       .alpha = 0x0,
+       .alpha_en_mask = BIT(24),
+       .vco_val = 0x2 << 20,
+       .vco_mask = GENMASK(21, 20),
+       .main_output_mask = BIT(0),
+       .config_ctl_val = 0x4001055B,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+       .offset = 0x0,
+       .vco_table = spark_vco,
+       .num_vco = ARRAY_SIZE(spark_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_pll0",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_ops,
+               },
+       },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dsi0_phy_pll_out_byteclk" },
+       { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+       { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+       { P_BI_TCXO, 0 },
+       { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+       { .fw_name = "bi_tcxo_ao" },
+       { .fw_name = "gcc_disp_gpll0_div_clk_src" },
+       { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+       { P_BI_TCXO, 0 },
+       { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+       { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &disp_cc_pll0.clkr.hw },
+       { .fw_name = "gcc_disp_gpll0_clk_src" },
+       { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+       { P_BI_TCXO, 0 },
+       { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+       { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dsi0_phy_pll_out_dsiclk" },
+       { .fw_name = "dsi1_phy_pll_out_dsiclk" },
+       { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+       { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+       { .fw_name = "sleep_clk" },
+       { .fw_name = "core_bi_pll_test_se" },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+       .cmd_rcgr = 0x20a4,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_byte0_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+               /* For set_rate and set_parent to succeed, parent(s) must be enabled */
+               .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+               .ops = &clk_byte2_ops,
+       },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+       .reg = 0x20bc,
+       .shift = 0,
+       .width = 2,
+       .clkr.hw.init = &(struct clk_init_data) {
+               .name = "disp_cc_mdss_byte0_div_clk_src",
+               .parent_hws = (const struct clk_hw*[]){
+                       &disp_cc_mdss_byte0_clk_src.clkr.hw,
+               },
+               .num_parents = 1,
+               .ops = &clk_regmap_div_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+       F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+       .cmd_rcgr = 0x2154,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_2,
+       .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_ahb_clk_src",
+               .parent_data = disp_cc_parent_data_2,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+       .cmd_rcgr = 0x20c0,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_esc0_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+       F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+       F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+       F(384000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+       .cmd_rcgr = 0x2074,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_3,
+       .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_mdp_clk_src",
+               .parent_data = disp_cc_parent_data_3,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+       .cmd_rcgr = 0x205c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_4,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_pclk0_clk_src",
+               .parent_data = disp_cc_parent_data_4,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+               /* For set_rate and set_parent to succeed, parent(s) must be enabled */
+               .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+               .ops = &clk_pixel_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+       .cmd_rcgr = 0x208c,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_vsync_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+       F(32764, P_SLEEP_CLK, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+       .cmd_rcgr = 0x6050,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_5,
+       .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_sleep_clk_src",
+               .parent_data = disp_cc_parent_data_5,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+       .halt_reg = 0x2044,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2044,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_ahb_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+       .halt_reg = 0x201c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x201c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_byte0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_byte0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+       .halt_reg = 0x2020,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2020,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_byte0_intf_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+       .halt_reg = 0x2024,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_esc0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_esc0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+       .halt_reg = 0x2008,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_mdp_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_mdp_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+       .halt_reg = 0x2010,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x2010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_mdp_lut_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_mdp_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+       .halt_reg = 0x4004,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x4004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+       .halt_reg = 0x2004,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_pclk0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+       .halt_reg = 0x2018,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_vsync_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_vsync_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+       .halt_reg = 0x6068,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x6068,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_sleep_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_sleep_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc mdss_gdsc = {
+       .gdscr = 0x3000,
+       .pd = {
+               .name = "mdss_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = HW_CTRL,
+};
+
+static struct gdsc *disp_cc_qcm2290_gdscs[] = {
+       [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static struct clk_regmap *disp_cc_qcm2290_clocks[] = {
+       [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+       [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+       [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+       [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+       [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+       [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+       [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+       [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+       [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+       [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+       [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+       [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+       [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+       [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+       [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+       [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+};
+
+static const struct regmap_config disp_cc_qcm2290_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .max_register = 0x10000,
+       .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_qcm2290_desc = {
+       .config = &disp_cc_qcm2290_regmap_config,
+       .clks = disp_cc_qcm2290_clocks,
+       .num_clks = ARRAY_SIZE(disp_cc_qcm2290_clocks),
+       .gdscs = disp_cc_qcm2290_gdscs,
+       .num_gdscs = ARRAY_SIZE(disp_cc_qcm2290_gdscs),
+};
+
+static const struct of_device_id disp_cc_qcm2290_match_table[] = {
+       { .compatible = "qcom,qcm2290-dispcc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_qcm2290_match_table);
+
+static int disp_cc_qcm2290_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+       int ret;
+
+       regmap = qcom_cc_map(pdev, &disp_cc_qcm2290_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+       /* Keep DISP_CC_XO_CLK always-ON */
+       regmap_update_bits(regmap, 0x604c, BIT(0), BIT(0));
+
+       ret = qcom_cc_really_probe(pdev, &disp_cc_qcm2290_desc, regmap);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register DISP CC clocks\n");
+               return ret;
+       }
+
+       return ret;
+}
+
+static struct platform_driver disp_cc_qcm2290_driver = {
+       .probe = disp_cc_qcm2290_probe,
+       .driver = {
+               .name = "dispcc-qcm2290",
+               .of_match_table = disp_cc_qcm2290_match_table,
+       },
+};
+
+static int __init disp_cc_qcm2290_init(void)
+{
+       return platform_driver_register(&disp_cc_qcm2290_driver);
+}
+subsys_initcall(disp_cc_qcm2290_init);
+
+static void __exit disp_cc_qcm2290_exit(void)
+{
+       platform_driver_unregister(&disp_cc_qcm2290_driver);
+}
+module_exit(disp_cc_qcm2290_exit);
+
+MODULE_DESCRIPTION("QTI DISP_CC qcm2290 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sm6125.c b/drivers/clk/qcom/dispcc-sm6125.c
new file mode 100644 (file)
index 0000000..b921456
--- /dev/null
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sm6125.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+       P_BI_TCXO,
+       P_DISP_CC_PLL0_OUT_MAIN,
+       P_DP_PHY_PLL_LINK_CLK,
+       P_DP_PHY_PLL_VCO_DIV_CLK,
+       P_DSI0_PHY_PLL_OUT_BYTECLK,
+       P_DSI0_PHY_PLL_OUT_DSICLK,
+       P_DSI1_PHY_PLL_OUT_DSICLK,
+       P_GPLL0_OUT_MAIN,
+};
+
+static struct pll_vco disp_cc_pll_vco[] = {
+       { 500000000, 1000000000, 2 },
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+       .offset = 0x0,
+       .vco_table = disp_cc_pll_vco,
+       .num_vco = ARRAY_SIZE(disp_cc_pll_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+       .flags = SUPPORTS_DYNAMIC_UPDATE,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_pll0",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_ops,
+               },
+       },
+};
+
+/* 768MHz configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+       .l = 0x28,
+       .vco_val = 0x2 << 20,
+       .vco_mask = 0x3 << 20,
+       .main_output_mask = BIT(0),
+       .config_ctl_val = 0x4001055b,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+       { P_BI_TCXO, 0 },
+       { P_DP_PHY_PLL_LINK_CLK, 1 },
+       { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dp_phy_pll_link_clk" },
+       { .fw_name = "dp_phy_pll_vco_div_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+       { P_BI_TCXO, 0 },
+       { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dsi0_phy_pll_out_byteclk" },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+       { P_BI_TCXO, 0 },
+       { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+       { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &disp_cc_pll0.clkr.hw },
+       { .fw_name = "gcc_disp_gpll0_div_clk_src" },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+       { P_BI_TCXO, 0 },
+       { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "gcc_disp_gpll0_div_clk_src" },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+       { P_BI_TCXO, 0 },
+       { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+       { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dsi0_phy_pll_out_dsiclk" },
+       { .fw_name = "dsi1_phy_pll_out_dsiclk" },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+       F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+       .cmd_rcgr = 0x2154,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_4,
+       .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_ahb_clk_src",
+               .parent_data = disp_cc_parent_data_4,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+       .cmd_rcgr = 0x20bc,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_2,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_byte0_clk_src",
+               .parent_data = disp_cc_parent_data_2,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+               .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               .ops = &clk_byte2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux1_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+       .cmd_rcgr = 0x213c,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_aux_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto_clk_src[] = {
+       F( 180000, P_DP_PHY_PLL_LINK_CLK,   1.5,   0,   0),
+       F( 360000, P_DP_PHY_PLL_LINK_CLK,   1.5,   0,   0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+       .cmd_rcgr = 0x210c,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_crypto_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_crypto_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+               .flags = CLK_GET_RATE_NOCACHE,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
+       F( 162000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+       F( 270000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+       F( 540000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+       .cmd_rcgr = 0x20f0,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_link_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+               .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+       .cmd_rcgr = 0x2124,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_pixel_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+               .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               .ops = &clk_dp_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+       .cmd_rcgr = 0x20d8,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_2,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_esc0_clk_src",
+               .parent_data = disp_cc_parent_data_2,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+       F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+       F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+       F(384000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+       F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+       .cmd_rcgr = 0x2074,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_3,
+       .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_mdp_clk_src",
+               .parent_data = disp_cc_parent_data_3,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+       .cmd_rcgr = 0x205c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_5,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_pclk0_clk_src",
+               .parent_data = disp_cc_parent_data_5,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+               .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               .ops = &clk_pixel_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+       F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+       F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+       .cmd_rcgr = 0x208c,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_3,
+       .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_rot_clk_src",
+               .parent_data = disp_cc_parent_data_3,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_shared_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+       .cmd_rcgr = 0x20a4,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_vsync_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+       .halt_reg = 0x2044,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2044,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_ahb_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+       .halt_reg = 0x2024,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_byte0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_byte0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+       .halt_reg = 0x2028,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_byte0_intf_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_byte0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+       .halt_reg = 0x2040,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2040,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_aux_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+       .halt_reg = 0x2038,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2038,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_crypto_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+       .halt_reg = 0x2030,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2030,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_link_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+       .halt_reg = 0x2034,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2034,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_link_intf_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+       .halt_reg = 0x203c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x203c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_pixel_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+       .halt_reg = 0x202c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x202c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_esc0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_esc0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+       .halt_reg = 0x2008,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_mdp_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_mdp_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+       .halt_reg = 0x2018,
+       .halt_check = BRANCH_VOTED,
+       .clkr = {
+               .enable_reg = 0x2018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_mdp_lut_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_mdp_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+       .halt_reg = 0x4004,
+       .halt_check = BRANCH_VOTED,
+       .clkr = {
+               .enable_reg = 0x4004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+       .halt_reg = 0x2004,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_pclk0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+       .halt_reg = 0x2010,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_rot_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_rot_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+       .halt_reg = 0x2020,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2020,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_vsync_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_vsync_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_xo_clk = {
+       .halt_reg = 0x604c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x604c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_xo_clk",
+                       .flags = CLK_IS_CRITICAL,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc mdss_gdsc = {
+       .gdscr = 0x3000,
+       .pd = {
+               .name = "mdss_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = HW_CTRL,
+};
+
+static struct clk_regmap *disp_cc_sm6125_clocks[] = {
+       [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+       [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+       [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+       [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+       [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+       [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+       [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+       [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+       [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+       [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+       [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+       [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+       [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+       [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+       [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+       [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+       [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+       [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+       [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+       [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+       [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+       [DISP_CC_XO_CLK] = &disp_cc_xo_clk.clkr,
+};
+
+static struct gdsc *disp_cc_sm6125_gdscs[] = {
+       [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm6125_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .max_register = 0x10000,
+       .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sm6125_desc = {
+       .config = &disp_cc_sm6125_regmap_config,
+       .clks = disp_cc_sm6125_clocks,
+       .num_clks = ARRAY_SIZE(disp_cc_sm6125_clocks),
+       .gdscs = disp_cc_sm6125_gdscs,
+       .num_gdscs = ARRAY_SIZE(disp_cc_sm6125_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm6125_match_table[] = {
+       { .compatible = "qcom,dispcc-sm6125" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm6125_match_table);
+
+static int disp_cc_sm6125_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &disp_cc_sm6125_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+       return qcom_cc_really_probe(pdev, &disp_cc_sm6125_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sm6125_driver = {
+       .probe = disp_cc_sm6125_probe,
+       .driver = {
+               .name = "disp_cc-sm6125",
+               .of_match_table = disp_cc_sm6125_match_table,
+       },
+};
+
+static int __init disp_cc_sm6125_init(void)
+{
+       return platform_driver_register(&disp_cc_sm6125_driver);
+}
+subsys_initcall(disp_cc_sm6125_init);
+
+static void __exit disp_cc_sm6125_exit(void)
+{
+       platform_driver_unregister(&disp_cc_sm6125_driver);
+}
+module_exit(disp_cc_sm6125_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC SM6125 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
new file mode 100644 (file)
index 0000000..0c3c2e2
--- /dev/null
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sm6350.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+       P_BI_TCXO,
+       P_DISP_CC_PLL0_OUT_EVEN,
+       P_DISP_CC_PLL0_OUT_MAIN,
+       P_DP_PHY_PLL_LINK_CLK,
+       P_DP_PHY_PLL_VCO_DIV_CLK,
+       P_DSI0_PHY_PLL_OUT_BYTECLK,
+       P_DSI0_PHY_PLL_OUT_DSICLK,
+       P_GCC_DISP_GPLL0_CLK,
+};
+
+static struct pll_vco fabia_vco[] = {
+       { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config disp_cc_pll0_config = {
+       .l = 0x3a,
+       .alpha = 0x5555,
+       .config_ctl_val = 0x20485699,
+       .config_ctl_hi_val = 0x00002067,
+       .test_ctl_val = 0x40000000,
+       .test_ctl_hi_val = 0x00000002,
+       .user_ctl_val = 0x00000000,
+       .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+       .offset = 0x0,
+       .vco_table = fabia_vco,
+       .num_vco = ARRAY_SIZE(fabia_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_pll0",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fabia_ops,
+               },
+       },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_DP_PHY_PLL_LINK_CLK, 1 },
+       { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dp_phy_pll_link_clk" },
+       { .fw_name = "dp_phy_pll_vco_div_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+       { P_BI_TCXO, 0 },
+       { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dsi0_phy_pll_out_byteclk" },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+       { P_BI_TCXO, 0 },
+       { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+       { P_GCC_DISP_GPLL0_CLK, 4 },
+       { P_DISP_CC_PLL0_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &disp_cc_pll0.clkr.hw },
+       { .fw_name = "gcc_disp_gpll0_clk" },
+       { .hw = &disp_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+       { P_BI_TCXO, 0 },
+       { P_GCC_DISP_GPLL0_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "gcc_disp_gpll0_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+       { P_BI_TCXO, 0 },
+       { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+       { .fw_name = "bi_tcxo" },
+       { .fw_name = "dsi0_phy_pll_out_dsiclk" },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+       { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+       { .fw_name = "bi_tcxo" },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(37500000, P_GCC_DISP_GPLL0_CLK, 16, 0, 0),
+       F(75000000, P_GCC_DISP_GPLL0_CLK, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+       .cmd_rcgr = 0x115c,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_4,
+       .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_ahb_clk_src",
+               .parent_data = disp_cc_parent_data_4,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+       .cmd_rcgr = 0x10c4,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_byte0_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+               .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               .ops = &clk_byte2_ops,
+       },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+       .reg = 0x10dc,
+       .shift = 0,
+       .width = 2,
+       .clkr.hw.init = &(struct clk_init_data) {
+               .name = "disp_cc_mdss_byte0_div_clk_src",
+               .parent_hws = (const struct clk_hw*[]){
+                       &disp_cc_mdss_byte0_clk_src.clkr.hw,
+               },
+               .num_parents = 1,
+               .flags = CLK_GET_RATE_NOCACHE,
+               .ops = &clk_regmap_div_ro_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+       .cmd_rcgr = 0x1144,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_aux_clk_src",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "bi_tcxo",
+               },
+               .num_parents = 1,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto_clk_src[] = {
+       F(108000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
+       F(180000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
+       F(360000, P_DP_PHY_PLL_LINK_CLK, 1.5, 0, 0),
+       F(540000, P_DP_PHY_PLL_LINK_CLK, 1.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+       .cmd_rcgr = 0x1114,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_crypto_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_crypto_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+               .flags = CLK_GET_RATE_NOCACHE,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
+       F(162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+       F(270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+       F(540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+       F(810000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+       .cmd_rcgr = 0x10f8,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_link_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+               .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+       .cmd_rcgr = 0x112c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_0,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_dp_pixel_clk_src",
+               .parent_data = disp_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+               .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               .ops = &clk_dp_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+       .cmd_rcgr = 0x10e0,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_1,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_esc0_clk_src",
+               .parent_data = disp_cc_parent_data_1,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(200000000, P_GCC_DISP_GPLL0_CLK, 3, 0, 0),
+       F(300000000, P_GCC_DISP_GPLL0_CLK, 2, 0, 0),
+       F(373333333, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+       F(448000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+       F(560000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+       .cmd_rcgr = 0x107c,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_3,
+       .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_mdp_clk_src",
+               .parent_data = disp_cc_parent_data_3,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+       .cmd_rcgr = 0x1064,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_5,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_pclk0_clk_src",
+               .parent_data = disp_cc_parent_data_5,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+               .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               .ops = &clk_pixel_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+       .cmd_rcgr = 0x1094,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_3,
+       .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_rot_clk_src",
+               .parent_data = disp_cc_parent_data_3,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+       .cmd_rcgr = 0x10ac,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = disp_cc_parent_map_6,
+       .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "disp_cc_mdss_vsync_clk_src",
+               .parent_data = disp_cc_parent_data_6,
+               .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+       .reg = 0x1110,
+       .shift = 0,
+       .width = 2,
+       .clkr.hw.init = &(struct clk_init_data) {
+               .name = "disp_cc_mdss_dp_link_div_clk_src",
+               .parent_hws = (const struct clk_hw*[]){
+                       &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+               },
+               .num_parents = 1,
+               .flags = CLK_GET_RATE_NOCACHE,
+               .ops = &clk_regmap_div_ro_ops,
+       },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+       .halt_reg = 0x104c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x104c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_ahb_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+       .halt_reg = 0x102c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x102c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_byte0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_byte0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+       .halt_reg = 0x1030,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1030,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_byte0_intf_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+       .halt_reg = 0x1048,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1048,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_aux_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+       .halt_reg = 0x1040,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1040,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_crypto_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+       .halt_reg = 0x1038,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1038,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_link_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+       .halt_reg = 0x103c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x103c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_link_intf_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+       .halt_reg = 0x1044,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1044,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_dp_pixel_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+       .halt_reg = 0x1034,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1034,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_esc0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_esc0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+       .halt_reg = 0x1010,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1010,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_mdp_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_mdp_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+       .halt_reg = 0x1020,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1020,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_mdp_lut_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_mdp_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+       .halt_reg = 0x2004,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x2004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+       .halt_reg = 0x100c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x100c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_pclk0_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+       .halt_reg = 0x1018,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_rot_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_rot_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+       .halt_reg = 0x200c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x200c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_rscc_ahb_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_ahb_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+       .halt_reg = 0x2008,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x2008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_rscc_vsync_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_vsync_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+       .halt_reg = 0x1028,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_mdss_vsync_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &disp_cc_mdss_vsync_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+       .halt_reg = 0x5004,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x5004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_sleep_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch disp_cc_xo_clk = {
+       .halt_reg = 0x5008,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x5008,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "disp_cc_xo_clk",
+                       .flags = CLK_IS_CRITICAL,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc mdss_gdsc = {
+       .gdscr = 0x1004,
+       .pd = {
+               .name = "mdss_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm6350_clocks[] = {
+       [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+       [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+       [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+       [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+       [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+       [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+       [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+       [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+       [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
+               &disp_cc_mdss_dp_link_div_clk_src.clkr,
+       [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+       [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+       [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+       [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+       [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+       [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+       [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+       [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+       [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+       [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+       [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+       [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+       [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+       [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+       [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+       [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+       [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+       [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+       [DISP_CC_XO_CLK] = &disp_cc_xo_clk.clkr,
+};
+
+static struct gdsc *disp_cc_sm6350_gdscs[] = {
+       [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm6350_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .max_register = 0x10000,
+       .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sm6350_desc = {
+       .config = &disp_cc_sm6350_regmap_config,
+       .clks = disp_cc_sm6350_clocks,
+       .num_clks = ARRAY_SIZE(disp_cc_sm6350_clocks),
+       .gdscs = disp_cc_sm6350_gdscs,
+       .num_gdscs = ARRAY_SIZE(disp_cc_sm6350_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm6350_match_table[] = {
+       { .compatible = "qcom,sm6350-dispcc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm6350_match_table);
+
+static int disp_cc_sm6350_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &disp_cc_sm6350_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+       return qcom_cc_really_probe(pdev, &disp_cc_sm6350_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sm6350_driver = {
+       .probe = disp_cc_sm6350_probe,
+       .driver = {
+               .name = "disp_cc-sm6350",
+               .of_match_table = disp_cc_sm6350_match_table,
+       },
+};
+
+static int __init disp_cc_sm6350_init(void)
+{
+       return platform_driver_register(&disp_cc_sm6350_driver);
+}
+subsys_initcall(disp_cc_sm6350_init);
+
+static void __exit disp_cc_sm6350_exit(void)
+{
+       platform_driver_unregister(&disp_cc_sm6350_driver);
+}
+module_exit(disp_cc_sm6350_exit);
+
+MODULE_DESCRIPTION("QTI DISP_CC SM6350 Driver");
+MODULE_LICENSE("GPL v2");
index d6b7adb..718de17 100644 (file)
 #include "clk-hfpll.h"
 #include "reset.h"
 
+static const struct clk_parent_data gcc_pxo[] = {
+       { .fw_name = "pxo", .name = "pxo" },
+};
+
 static struct clk_pll pll0 = {
        .l_reg = 0x30c4,
        .m_reg = 0x30c8,
@@ -35,7 +39,7 @@ static struct clk_pll pll0 = {
        .status_bit = 16,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pll0",
-               .parent_names = (const char *[]){ "pxo" },
+               .parent_data = gcc_pxo,
                .num_parents = 1,
                .ops = &clk_pll_ops,
        },
@@ -46,7 +50,9 @@ static struct clk_regmap pll0_vote = {
        .enable_mask = BIT(0),
        .hw.init = &(struct clk_init_data){
                .name = "pll0_vote",
-               .parent_names = (const char *[]){ "pll0" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &pll0.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_pll_vote_ops,
        },
@@ -62,7 +68,7 @@ static struct clk_pll pll3 = {
        .status_bit = 16,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pll3",
-               .parent_names = (const char *[]){ "pxo" },
+               .parent_data = gcc_pxo,
                .num_parents = 1,
                .ops = &clk_pll_ops,
        },
@@ -89,7 +95,7 @@ static struct clk_pll pll8 = {
        .status_bit = 16,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pll8",
-               .parent_names = (const char *[]){ "pxo" },
+               .parent_data = gcc_pxo,
                .num_parents = 1,
                .ops = &clk_pll_ops,
        },
@@ -100,7 +106,9 @@ static struct clk_regmap pll8_vote = {
        .enable_mask = BIT(8),
        .hw.init = &(struct clk_init_data){
                .name = "pll8_vote",
-               .parent_names = (const char *[]){ "pll8" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &pll8.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_pll_vote_ops,
        },
@@ -123,7 +131,7 @@ static struct hfpll_data hfpll0_data = {
 static struct clk_hfpll hfpll0 = {
        .d = &hfpll0_data,
        .clkr.hw.init = &(struct clk_init_data){
-               .parent_names = (const char *[]){ "pxo" },
+               .parent_data = gcc_pxo,
                .num_parents = 1,
                .name = "hfpll0",
                .ops = &clk_ops_hfpll,
@@ -149,7 +157,7 @@ static struct hfpll_data hfpll1_data = {
 static struct clk_hfpll hfpll1 = {
        .d = &hfpll1_data,
        .clkr.hw.init = &(struct clk_init_data){
-               .parent_names = (const char *[]){ "pxo" },
+               .parent_data = gcc_pxo,
                .num_parents = 1,
                .name = "hfpll1",
                .ops = &clk_ops_hfpll,
@@ -175,7 +183,7 @@ static struct hfpll_data hfpll_l2_data = {
 static struct clk_hfpll hfpll_l2 = {
        .d = &hfpll_l2_data,
        .clkr.hw.init = &(struct clk_init_data){
-               .parent_names = (const char *[]){ "pxo" },
+               .parent_data = gcc_pxo,
                .num_parents = 1,
                .name = "hfpll_l2",
                .ops = &clk_ops_hfpll,
@@ -194,7 +202,7 @@ static struct clk_pll pll14 = {
        .status_bit = 16,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pll14",
-               .parent_names = (const char *[]){ "pxo" },
+               .parent_data = gcc_pxo,
                .num_parents = 1,
                .ops = &clk_pll_ops,
        },
@@ -205,7 +213,9 @@ static struct clk_regmap pll14_vote = {
        .enable_mask = BIT(14),
        .hw.init = &(struct clk_init_data){
                .name = "pll14_vote",
-               .parent_names = (const char *[]){ "pll14" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &pll14.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_pll_vote_ops,
        },
@@ -222,7 +232,9 @@ static struct clk_regmap pll14_vote = {
 
 static struct pll_freq_tbl pll18_freq_tbl[] = {
        NSS_PLL_RATE(550000000, 44, 0, 1, 0x01495625),
+       NSS_PLL_RATE(600000000, 48, 0, 1, 0x01495625),
        NSS_PLL_RATE(733000000, 58, 16, 25, 0x014b5625),
+       NSS_PLL_RATE(800000000, 64, 0, 1, 0x01495625),
 };
 
 static struct clk_pll pll18 = {
@@ -238,7 +250,25 @@ static struct clk_pll pll18 = {
        .freq_tbl = pll18_freq_tbl,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pll18",
-               .parent_names = (const char *[]){ "pxo" },
+               .parent_data = gcc_pxo,
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_pll pll11 = {
+       .l_reg = 0x3184,
+       .m_reg = 0x3188,
+       .n_reg = 0x318c,
+       .config_reg = 0x3194,
+       .mode_reg = 0x3180,
+       .status_reg = 0x3198,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll11",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "pxo",
+               },
                .num_parents = 1,
                .ops = &clk_pll_ops,
        },
@@ -252,6 +282,7 @@ enum {
        P_CXO,
        P_PLL14,
        P_PLL18,
+       P_PLL11,
 };
 
 static const struct parent_map gcc_pxo_pll8_map[] = {
@@ -259,9 +290,9 @@ static const struct parent_map gcc_pxo_pll8_map[] = {
        { P_PLL8, 3 }
 };
 
-static const char * const gcc_pxo_pll8[] = {
-       "pxo",
-       "pll8_vote",
+static const struct clk_parent_data gcc_pxo_pll8[] = {
+       { .fw_name = "pxo", .name = "pxo" },
+       { .hw = &pll8_vote.hw },
 };
 
 static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
@@ -270,10 +301,10 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
        { P_CXO, 5 }
 };
 
-static const char * const gcc_pxo_pll8_cxo[] = {
-       "pxo",
-       "pll8_vote",
-       "cxo",
+static const struct clk_parent_data gcc_pxo_pll8_cxo[] = {
+       { .fw_name = "pxo", .name = "pxo" },
+       { .hw = &pll8_vote.hw },
+       { .fw_name = "cxo", .name = "cxo" },
 };
 
 static const struct parent_map gcc_pxo_pll3_map[] = {
@@ -286,21 +317,21 @@ static const struct parent_map gcc_pxo_pll3_sata_map[] = {
        { P_PLL3, 6 }
 };
 
-static const char * const gcc_pxo_pll3[] = {
-       "pxo",
-       "pll3",
+static const struct clk_parent_data gcc_pxo_pll3[] = {
+       { .fw_name = "pxo", .name = "pxo" },
+       { .hw = &pll3.clkr.hw },
 };
 
-static const struct parent_map gcc_pxo_pll8_pll0[] = {
+static const struct parent_map gcc_pxo_pll8_pll0_map[] = {
        { P_PXO, 0 },
        { P_PLL8, 3 },
        { P_PLL0, 2 }
 };
 
-static const char * const gcc_pxo_pll8_pll0_map[] = {
-       "pxo",
-       "pll8_vote",
-       "pll0_vote",
+static const struct clk_parent_data gcc_pxo_pll8_pll0[] = {
+       { .fw_name = "pxo", .name = "pxo" },
+       { .hw = &pll8_vote.hw },
+       { .hw = &pll0_vote.hw },
 };
 
 static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = {
@@ -311,12 +342,50 @@ static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = {
        { P_PLL18, 1 }
 };
 
-static const char * const gcc_pxo_pll8_pll14_pll18_pll0[] = {
-       "pxo",
-       "pll8_vote",
-       "pll0_vote",
-       "pll14",
-       "pll18",
+static const struct clk_parent_data gcc_pxo_pll8_pll14_pll18_pll0[] = {
+       { .fw_name = "pxo", .name = "pxo" },
+       { .hw = &pll8_vote.hw },
+       { .hw = &pll0_vote.hw },
+       { .hw = &pll14.clkr.hw },
+       { .hw = &pll18.clkr.hw },
+};
+
+static const struct parent_map gcc_pxo_pll8_pll0_pll14_pll18_pll11_map[] = {
+       { P_PXO, 0 },
+       { P_PLL8, 4 },
+       { P_PLL0, 2 },
+       { P_PLL14, 5 },
+       { P_PLL18, 1 },
+       { P_PLL11, 3 },
+};
+
+static const struct clk_parent_data gcc_pxo_pll8_pll0_pll14_pll18_pll11[] = {
+       { .fw_name = "pxo" },
+       { .hw = &pll8_vote.hw },
+       { .hw = &pll0_vote.hw },
+       { .hw = &pll14.clkr.hw },
+       { .hw = &pll18.clkr.hw },
+       { .hw = &pll11.clkr.hw },
+
+};
+
+static const struct parent_map gcc_pxo_pll3_pll0_pll14_pll18_pll11_map[] = {
+       { P_PXO, 0 },
+       { P_PLL3, 6 },
+       { P_PLL0, 2 },
+       { P_PLL14, 5 },
+       { P_PLL18, 1 },
+       { P_PLL11, 3 },
+};
+
+static const struct clk_parent_data gcc_pxo_pll3_pll0_pll14_pll18_pll11[] = {
+       { .fw_name = "pxo" },
+       { .hw = &pll3.clkr.hw },
+       { .hw = &pll0_vote.hw },
+       { .hw = &pll14.clkr.hw },
+       { .hw = &pll18.clkr.hw },
+       { .hw = &pll11.clkr.hw },
+
 };
 
 static struct freq_tbl clk_tbl_gsbi_uart[] = {
@@ -362,8 +431,8 @@ static struct clk_rcg gsbi1_uart_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi1_uart_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -378,8 +447,8 @@ static struct clk_branch gsbi1_uart_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi1_uart_clk",
-                       .parent_names = (const char *[]){
-                               "gsbi1_uart_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi1_uart_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -413,8 +482,8 @@ static struct clk_rcg gsbi2_uart_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi2_uart_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -429,8 +498,8 @@ static struct clk_branch gsbi2_uart_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi2_uart_clk",
-                       .parent_names = (const char *[]){
-                               "gsbi2_uart_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi2_uart_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -464,8 +533,8 @@ static struct clk_rcg gsbi4_uart_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi4_uart_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -480,8 +549,8 @@ static struct clk_branch gsbi4_uart_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi4_uart_clk",
-                       .parent_names = (const char *[]){
-                               "gsbi4_uart_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi4_uart_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -515,8 +584,8 @@ static struct clk_rcg gsbi5_uart_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi5_uart_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -531,8 +600,8 @@ static struct clk_branch gsbi5_uart_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi5_uart_clk",
-                       .parent_names = (const char *[]){
-                               "gsbi5_uart_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi5_uart_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -566,8 +635,8 @@ static struct clk_rcg gsbi6_uart_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi6_uart_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -582,8 +651,8 @@ static struct clk_branch gsbi6_uart_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi6_uart_clk",
-                       .parent_names = (const char *[]){
-                               "gsbi6_uart_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi6_uart_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -617,8 +686,8 @@ static struct clk_rcg gsbi7_uart_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi7_uart_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -633,8 +702,8 @@ static struct clk_branch gsbi7_uart_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi7_uart_clk",
-                       .parent_names = (const char *[]){
-                               "gsbi7_uart_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi7_uart_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -681,8 +750,8 @@ static struct clk_rcg gsbi1_qup_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi1_qup_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -697,7 +766,9 @@ static struct clk_branch gsbi1_qup_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi1_qup_clk",
-                       .parent_names = (const char *[]){ "gsbi1_qup_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi1_qup_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -730,8 +801,8 @@ static struct clk_rcg gsbi2_qup_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi2_qup_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -746,7 +817,9 @@ static struct clk_branch gsbi2_qup_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi2_qup_clk",
-                       .parent_names = (const char *[]){ "gsbi2_qup_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi2_qup_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -779,10 +852,10 @@ static struct clk_rcg gsbi4_qup_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi4_qup_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
-                       .flags = CLK_SET_PARENT_GATE,
+                       .flags = CLK_SET_PARENT_GATE | CLK_IGNORE_UNUSED,
                },
        },
 };
@@ -795,10 +868,12 @@ static struct clk_branch gsbi4_qup_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi4_qup_clk",
-                       .parent_names = (const char *[]){ "gsbi4_qup_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi4_qup_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
-                       .flags = CLK_SET_RATE_PARENT,
+                       .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
                },
        },
 };
@@ -828,8 +903,8 @@ static struct clk_rcg gsbi5_qup_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi5_qup_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -844,7 +919,9 @@ static struct clk_branch gsbi5_qup_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi5_qup_clk",
-                       .parent_names = (const char *[]){ "gsbi5_qup_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi5_qup_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -877,10 +954,10 @@ static struct clk_rcg gsbi6_qup_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi6_qup_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
-                       .flags = CLK_SET_PARENT_GATE,
+                       .flags = CLK_SET_PARENT_GATE | CLK_IGNORE_UNUSED,
                },
        },
 };
@@ -893,7 +970,9 @@ static struct clk_branch gsbi6_qup_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi6_qup_clk",
-                       .parent_names = (const char *[]){ "gsbi6_qup_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi6_qup_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -926,8 +1005,8 @@ static struct clk_rcg gsbi7_qup_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi7_qup_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -942,10 +1021,12 @@ static struct clk_branch gsbi7_qup_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi7_qup_clk",
-                       .parent_names = (const char *[]){ "gsbi7_qup_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gsbi7_qup_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
-                       .flags = CLK_SET_RATE_PARENT,
+                       .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
                },
        },
 };
@@ -991,6 +1072,7 @@ static struct clk_branch gsbi4_h_clk = {
                .hw.init = &(struct clk_init_data){
                        .name = "gsbi4_h_clk",
                        .ops = &clk_branch_ops,
+                       .flags = CLK_IGNORE_UNUSED,
                },
        },
 };
@@ -1076,8 +1158,8 @@ static struct clk_rcg gp0_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gp0_src",
-                       .parent_names = gcc_pxo_pll8_cxo,
-                       .num_parents = 3,
+                       .parent_data = gcc_pxo_pll8_cxo,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_PARENT_GATE,
                },
@@ -1092,7 +1174,9 @@ static struct clk_branch gp0_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gp0_clk",
-                       .parent_names = (const char *[]){ "gp0_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gp0_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1125,8 +1209,8 @@ static struct clk_rcg gp1_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gp1_src",
-                       .parent_names = gcc_pxo_pll8_cxo,
-                       .num_parents = 3,
+                       .parent_data = gcc_pxo_pll8_cxo,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -1141,7 +1225,9 @@ static struct clk_branch gp1_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gp1_clk",
-                       .parent_names = (const char *[]){ "gp1_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gp1_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1174,8 +1260,8 @@ static struct clk_rcg gp2_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "gp2_src",
-                       .parent_names = gcc_pxo_pll8_cxo,
-                       .num_parents = 3,
+                       .parent_data = gcc_pxo_pll8_cxo,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -1190,7 +1276,9 @@ static struct clk_branch gp2_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "gp2_clk",
-                       .parent_names = (const char *[]){ "gp2_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gp2_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1228,8 +1316,8 @@ static struct clk_rcg prng_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "prng_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                },
        },
@@ -1244,7 +1332,9 @@ static struct clk_branch prng_clk = {
                .enable_mask = BIT(10),
                .hw.init = &(struct clk_init_data){
                        .name = "prng_clk",
-                       .parent_names = (const char *[]){ "prng_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &prng_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                },
@@ -1259,6 +1349,7 @@ static const struct freq_tbl clk_tbl_sdc[] = {
        {  20210000, P_PLL8,  1, 1,  19 },
        {  24000000, P_PLL8,  4, 1,   4 },
        {  48000000, P_PLL8,  4, 1,   2 },
+       {  51200000, P_PLL8,  1, 2,  15 },
        {  64000000, P_PLL8,  3, 1,   2 },
        {  96000000, P_PLL8,  4, 0,   0 },
        { 192000000, P_PLL8,  2, 0,   0 },
@@ -1290,9 +1381,9 @@ static struct clk_rcg sdc1_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "sdc1_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
-                       .ops = &clk_rcg_ops,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
+                       .ops = &clk_rcg_floor_ops,
                },
        }
 };
@@ -1305,7 +1396,9 @@ static struct clk_branch sdc1_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "sdc1_clk",
-                       .parent_names = (const char *[]){ "sdc1_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sdc1_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1338,8 +1431,8 @@ static struct clk_rcg sdc3_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "sdc3_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                },
        }
@@ -1353,7 +1446,9 @@ static struct clk_branch sdc3_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "sdc3_clk",
-                       .parent_names = (const char *[]){ "sdc3_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sdc3_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1421,8 +1516,8 @@ static struct clk_rcg tsif_ref_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "tsif_ref_src",
-                       .parent_names = gcc_pxo_pll8,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll8,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
                        .ops = &clk_rcg_ops,
                },
        }
@@ -1436,7 +1531,9 @@ static struct clk_branch tsif_ref_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "tsif_ref_clk",
-                       .parent_names = (const char *[]){ "tsif_ref_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &tsif_ref_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1583,8 +1680,8 @@ static struct clk_rcg pcie_ref_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "pcie_ref_src",
-                       .parent_names = gcc_pxo_pll3,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll3,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll3),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -1599,7 +1696,9 @@ static struct clk_branch pcie_ref_src_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "pcie_ref_src_clk",
-                       .parent_names = (const char *[]){ "pcie_ref_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &pcie_ref_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1675,8 +1774,8 @@ static struct clk_rcg pcie1_ref_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "pcie1_ref_src",
-                       .parent_names = gcc_pxo_pll3,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll3,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll3),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -1691,7 +1790,9 @@ static struct clk_branch pcie1_ref_src_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "pcie1_ref_src_clk",
-                       .parent_names = (const char *[]){ "pcie1_ref_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &pcie1_ref_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1767,8 +1868,8 @@ static struct clk_rcg pcie2_ref_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "pcie2_ref_src",
-                       .parent_names = gcc_pxo_pll3,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll3,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll3),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -1783,7 +1884,9 @@ static struct clk_branch pcie2_ref_src_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "pcie2_ref_src_clk",
-                       .parent_names = (const char *[]){ "pcie2_ref_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &pcie2_ref_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1864,8 +1967,8 @@ static struct clk_rcg sata_ref_src = {
                .enable_mask = BIT(7),
                .hw.init = &(struct clk_init_data){
                        .name = "sata_ref_src",
-                       .parent_names = gcc_pxo_pll3,
-                       .num_parents = 2,
+                       .parent_data = gcc_pxo_pll3,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll3),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -1880,7 +1983,9 @@ static struct clk_branch sata_rxoob_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "sata_rxoob_clk",
-                       .parent_names = (const char *[]){ "sata_ref_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sata_ref_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1896,7 +2001,9 @@ static struct clk_branch sata_pmalive_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "sata_pmalive_clk",
-                       .parent_names = (const char *[]){ "sata_ref_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sata_ref_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -1912,7 +2019,7 @@ static struct clk_branch sata_phy_ref_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "sata_phy_ref_clk",
-                       .parent_names = (const char *[]){ "pxo" },
+                       .parent_data = gcc_pxo,
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                },
@@ -1993,7 +2100,7 @@ static struct clk_rcg usb30_master_clk_src = {
        },
        .s = {
                .src_sel_shift = 0,
-               .parent_map = gcc_pxo_pll8_pll0,
+               .parent_map = gcc_pxo_pll8_pll0_map,
        },
        .freq_tbl = clk_tbl_usb30_master,
        .clkr = {
@@ -2001,8 +2108,8 @@ static struct clk_rcg usb30_master_clk_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "usb30_master_ref_src",
-                       .parent_names = gcc_pxo_pll8_pll0_map,
-                       .num_parents = 3,
+                       .parent_data = gcc_pxo_pll8_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll0),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -2017,7 +2124,9 @@ static struct clk_branch usb30_0_branch_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "usb30_0_branch_clk",
-                       .parent_names = (const char *[]){ "usb30_master_ref_src", },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb30_master_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -2033,7 +2142,9 @@ static struct clk_branch usb30_1_branch_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "usb30_1_branch_clk",
-                       .parent_names = (const char *[]){ "usb30_master_ref_src", },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb30_master_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -2063,7 +2174,7 @@ static struct clk_rcg usb30_utmi_clk = {
        },
        .s = {
                .src_sel_shift = 0,
-               .parent_map = gcc_pxo_pll8_pll0,
+               .parent_map = gcc_pxo_pll8_pll0_map,
        },
        .freq_tbl = clk_tbl_usb30_utmi,
        .clkr = {
@@ -2071,8 +2182,8 @@ static struct clk_rcg usb30_utmi_clk = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "usb30_utmi_clk",
-                       .parent_names = gcc_pxo_pll8_pll0_map,
-                       .num_parents = 3,
+                       .parent_data = gcc_pxo_pll8_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll0),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -2087,7 +2198,9 @@ static struct clk_branch usb30_0_utmi_clk_ctl = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "usb30_0_utmi_clk_ctl",
-                       .parent_names = (const char *[]){ "usb30_utmi_clk", },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb30_utmi_clk.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -2103,7 +2216,9 @@ static struct clk_branch usb30_1_utmi_clk_ctl = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "usb30_1_utmi_clk_ctl",
-                       .parent_names = (const char *[]){ "usb30_utmi_clk", },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb30_utmi_clk.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -2133,7 +2248,7 @@ static struct clk_rcg usb_hs1_xcvr_clk_src = {
        },
        .s = {
                .src_sel_shift = 0,
-               .parent_map = gcc_pxo_pll8_pll0,
+               .parent_map = gcc_pxo_pll8_pll0_map,
        },
        .freq_tbl = clk_tbl_usb,
        .clkr = {
@@ -2141,8 +2256,8 @@ static struct clk_rcg usb_hs1_xcvr_clk_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "usb_hs1_xcvr_src",
-                       .parent_names = gcc_pxo_pll8_pll0_map,
-                       .num_parents = 3,
+                       .parent_data = gcc_pxo_pll8_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll0),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -2157,7 +2272,9 @@ static struct clk_branch usb_hs1_xcvr_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "usb_hs1_xcvr_clk",
-                       .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb_hs1_xcvr_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -2197,7 +2314,7 @@ static struct clk_rcg usb_fs1_xcvr_clk_src = {
        },
        .s = {
                .src_sel_shift = 0,
-               .parent_map = gcc_pxo_pll8_pll0,
+               .parent_map = gcc_pxo_pll8_pll0_map,
        },
        .freq_tbl = clk_tbl_usb,
        .clkr = {
@@ -2205,8 +2322,8 @@ static struct clk_rcg usb_fs1_xcvr_clk_src = {
                .enable_mask = BIT(11),
                .hw.init = &(struct clk_init_data){
                        .name = "usb_fs1_xcvr_src",
-                       .parent_names = gcc_pxo_pll8_pll0_map,
-                       .num_parents = 3,
+                       .parent_data = gcc_pxo_pll8_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll0),
                        .ops = &clk_rcg_ops,
                        .flags = CLK_SET_RATE_GATE,
                },
@@ -2221,7 +2338,9 @@ static struct clk_branch usb_fs1_xcvr_clk = {
                .enable_mask = BIT(9),
                .hw.init = &(struct clk_init_data){
                        .name = "usb_fs1_xcvr_clk",
-                       .parent_names = (const char *[]){ "usb_fs1_xcvr_src", },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb_fs1_xcvr_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -2237,7 +2356,9 @@ static struct clk_branch usb_fs1_sys_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "usb_fs1_sys_clk",
-                       .parent_names = (const char *[]){ "usb_fs1_xcvr_src", },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb_fs1_xcvr_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
                        .flags = CLK_SET_RATE_PARENT,
@@ -2337,8 +2458,8 @@ static struct clk_dyn_rcg gmac_core1_src = {
                .enable_mask = BIT(1),
                .hw.init = &(struct clk_init_data){
                        .name = "gmac_core1_src",
-                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
-                       .num_parents = 5,
+                       .parent_data = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll14_pll18_pll0),
                        .ops = &clk_dyn_rcg_ops,
                },
        },
@@ -2354,8 +2475,8 @@ static struct clk_branch gmac_core1_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "gmac_core1_clk",
-                       .parent_names = (const char *[]){
-                               "gmac_core1_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gmac_core1_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -2409,8 +2530,8 @@ static struct clk_dyn_rcg gmac_core2_src = {
                .enable_mask = BIT(1),
                .hw.init = &(struct clk_init_data){
                        .name = "gmac_core2_src",
-                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
-                       .num_parents = 5,
+                       .parent_data = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll14_pll18_pll0),
                        .ops = &clk_dyn_rcg_ops,
                },
        },
@@ -2426,8 +2547,8 @@ static struct clk_branch gmac_core2_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "gmac_core2_clk",
-                       .parent_names = (const char *[]){
-                               "gmac_core2_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gmac_core2_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -2481,8 +2602,8 @@ static struct clk_dyn_rcg gmac_core3_src = {
                .enable_mask = BIT(1),
                .hw.init = &(struct clk_init_data){
                        .name = "gmac_core3_src",
-                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
-                       .num_parents = 5,
+                       .parent_data = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll14_pll18_pll0),
                        .ops = &clk_dyn_rcg_ops,
                },
        },
@@ -2498,8 +2619,8 @@ static struct clk_branch gmac_core3_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "gmac_core3_clk",
-                       .parent_names = (const char *[]){
-                               "gmac_core3_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gmac_core3_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -2553,8 +2674,8 @@ static struct clk_dyn_rcg gmac_core4_src = {
                .enable_mask = BIT(1),
                .hw.init = &(struct clk_init_data){
                        .name = "gmac_core4_src",
-                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
-                       .num_parents = 5,
+                       .parent_data = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll14_pll18_pll0),
                        .ops = &clk_dyn_rcg_ops,
                },
        },
@@ -2570,8 +2691,8 @@ static struct clk_branch gmac_core4_clk = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "gmac_core4_clk",
-                       .parent_names = (const char *[]){
-                               "gmac_core4_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gmac_core4_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -2613,8 +2734,8 @@ static struct clk_dyn_rcg nss_tcm_src = {
                .enable_mask = BIT(1),
                .hw.init = &(struct clk_init_data){
                        .name = "nss_tcm_src",
-                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
-                       .num_parents = 5,
+                       .parent_data = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll14_pll18_pll0),
                        .ops = &clk_dyn_rcg_ops,
                },
        },
@@ -2628,8 +2749,8 @@ static struct clk_branch nss_tcm_clk = {
                .enable_mask = BIT(6) | BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "nss_tcm_clk",
-                       .parent_names = (const char *[]){
-                               "nss_tcm_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &nss_tcm_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch_ops,
@@ -2638,7 +2759,7 @@ static struct clk_branch nss_tcm_clk = {
        },
 };
 
-static const struct freq_tbl clk_tbl_nss[] = {
+static const struct freq_tbl clk_tbl_nss_ipq8064[] = {
        { 110000000, P_PLL18, 1, 1, 5 },
        { 275000000, P_PLL18, 2, 0, 0 },
        { 550000000, P_PLL18, 1, 0, 0 },
@@ -2646,6 +2767,14 @@ static const struct freq_tbl clk_tbl_nss[] = {
        { }
 };
 
+static const struct freq_tbl clk_tbl_nss_ipq8065[] = {
+       { 110000000, P_PLL18, 1, 1, 5 },
+       { 275000000, P_PLL18, 2, 0, 0 },
+       { 600000000, P_PLL18, 1, 0, 0 },
+       { 800000000, P_PLL18, 1, 0, 0 },
+       { }
+};
+
 static struct clk_dyn_rcg ubi32_core1_src_clk = {
        .ns_reg[0] = 0x3d2c,
        .ns_reg[1] = 0x3d30,
@@ -2685,14 +2814,14 @@ static struct clk_dyn_rcg ubi32_core1_src_clk = {
                .pre_div_width = 2,
        },
        .mux_sel_bit = 0,
-       .freq_tbl = clk_tbl_nss,
+       /* nss freq table is selected based on the SoC compatible */
        .clkr = {
                .enable_reg = 0x3d20,
                .enable_mask = BIT(1),
                .hw.init = &(struct clk_init_data){
                        .name = "ubi32_core1_src_clk",
-                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
-                       .num_parents = 5,
+                       .parent_data = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll14_pll18_pll0),
                        .ops = &clk_dyn_rcg_ops,
                        .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
                },
@@ -2738,20 +2867,200 @@ static struct clk_dyn_rcg ubi32_core2_src_clk = {
                .pre_div_width = 2,
        },
        .mux_sel_bit = 0,
-       .freq_tbl = clk_tbl_nss,
+       /* nss freq table is selected based on the SoC compatible */
        .clkr = {
                .enable_reg = 0x3d40,
                .enable_mask = BIT(1),
                .hw.init = &(struct clk_init_data){
                        .name = "ubi32_core2_src_clk",
-                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
-                       .num_parents = 5,
+                       .parent_data = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll14_pll18_pll0),
                        .ops = &clk_dyn_rcg_ops,
                        .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
                },
        },
 };
 
+static const struct freq_tbl clk_tbl_ce5_core[] = {
+       { 150000000, P_PLL3, 8, 1, 1 },
+       { 213200000, P_PLL11, 5, 1, 1 },
+       { }
+};
+
+static struct clk_dyn_rcg ce5_core_src = {
+       .ns_reg[0] = 0x36C4,
+       .ns_reg[1] = 0x36C8,
+       .bank_reg = 0x36C0,
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll3_pll0_pll14_pll18_pll11_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll3_pll0_pll14_pll18_pll11_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_ce5_core,
+       .clkr = {
+               .enable_reg = 0x36C0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce5_core_src",
+                       .parent_data = gcc_pxo_pll3_pll0_pll14_pll18_pll11,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll3_pll0_pll14_pll18_pll11),
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch ce5_core_clk = {
+       .halt_reg = 0x2FDC,
+       .halt_bit = 5,
+       .hwcg_reg = 0x36CC,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x36CC,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce5_core_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ce5_core_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_ce5_a_clk[] = {
+       { 160000000, P_PLL0, 5, 1, 1 },
+       { 213200000, P_PLL11, 5, 1, 1 },
+       { }
+};
+
+static struct clk_dyn_rcg ce5_a_clk_src = {
+       .ns_reg[0] = 0x3d84,
+       .ns_reg[1] = 0x3d88,
+       .bank_reg = 0x3d80,
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll0_pll14_pll18_pll11_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll0_pll14_pll18_pll11_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_ce5_a_clk,
+       .clkr = {
+               .enable_reg = 0x3d80,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce5_a_clk_src",
+                       .parent_data = gcc_pxo_pll8_pll0_pll14_pll18_pll11,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll0_pll14_pll18_pll11),
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch ce5_a_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 12,
+       .hwcg_reg = 0x3d8c,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3d8c,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce5_a_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ce5_a_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_ce5_h_clk[] = {
+       { 160000000, P_PLL0, 5, 1, 1 },
+       { 213200000, P_PLL11, 5, 1, 1 },
+       { }
+};
+
+static struct clk_dyn_rcg ce5_h_clk_src = {
+       .ns_reg[0] = 0x3c64,
+       .ns_reg[1] = 0x3c68,
+       .bank_reg = 0x3c60,
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll0_pll14_pll18_pll11_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll0_pll14_pll18_pll11_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_ce5_h_clk,
+       .clkr = {
+               .enable_reg = 0x3c60,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce5_h_clk_src",
+                       .parent_data = gcc_pxo_pll8_pll0_pll14_pll18_pll11,
+                       .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll0_pll14_pll18_pll11),
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch ce5_h_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 11,
+       .hwcg_reg = 0x3c6c,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3c6c,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce5_h_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ce5_h_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
 static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL0] = &pll0.clkr,
        [PLL0_VOTE] = &pll0_vote,
@@ -2759,6 +3068,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL4_VOTE] = &pll4_vote,
        [PLL8] = &pll8.clkr,
        [PLL8_VOTE] = &pll8_vote,
+       [PLL11] = &pll11.clkr,
        [PLL14] = &pll14.clkr,
        [PLL14_VOTE] = &pll14_vote,
        [PLL18] = &pll18.clkr,
@@ -2873,6 +3183,12 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL9] = &hfpll0.clkr,
        [PLL10] = &hfpll1.clkr,
        [PLL12] = &hfpll_l2.clkr,
+       [CE5_A_CLK_SRC] = &ce5_a_clk_src.clkr,
+       [CE5_A_CLK] = &ce5_a_clk.clkr,
+       [CE5_H_CLK_SRC] = &ce5_h_clk_src.clkr,
+       [CE5_H_CLK] = &ce5_h_clk.clkr,
+       [CE5_CORE_CLK_SRC] = &ce5_core_src.clkr,
+       [CE5_CORE_CLK] = &ce5_core_clk.clkr,
 };
 
 static const struct qcom_reset_map gcc_ipq806x_resets[] = {
@@ -3004,6 +3320,11 @@ static const struct qcom_reset_map gcc_ipq806x_resets[] = {
        [GMAC_CORE3_RESET] = { 0x3cfc, 0 },
        [GMAC_CORE4_RESET] = { 0x3d1c, 0 },
        [GMAC_AHB_RESET] = { 0x3e24, 0 },
+       [CRYPTO_ENG1_RESET] = { 0x3e00, 0},
+       [CRYPTO_ENG2_RESET] = { 0x3e04, 0},
+       [CRYPTO_ENG3_RESET] = { 0x3e08, 0},
+       [CRYPTO_ENG4_RESET] = { 0x3e0c, 0},
+       [CRYPTO_AHB_RESET] = { 0x3e10, 0},
        [NSS_CH0_RST_RX_CLK_N_RESET] = { 0x3b60, 0 },
        [NSS_CH0_RST_TX_CLK_N_RESET] = { 0x3b60, 1 },
        [NSS_CH0_RST_RX_125M_N_RESET] = { 0x3b60, 2 },
@@ -3071,6 +3392,14 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       if (of_machine_is_compatible("qcom,ipq8065")) {
+               ubi32_core1_src_clk.freq_tbl = clk_tbl_nss_ipq8065;
+               ubi32_core2_src_clk.freq_tbl = clk_tbl_nss_ipq8065;
+       } else {
+               ubi32_core1_src_clk.freq_tbl = clk_tbl_nss_ipq8064;
+               ubi32_core2_src_clk.freq_tbl = clk_tbl_nss_ipq8064;
+       }
+
        ret = qcom_cc_probe(pdev, &gcc_ipq806x_desc);
        if (ret)
                return ret;
index 108fe27..541016d 100644 (file)
@@ -60,11 +60,6 @@ static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = {
        { P_GPLL0_DIV2, 4 },
 };
 
-static const char * const gcc_xo_gpll0[] = {
-       "xo",
-       "gpll0",
-};
-
 static const struct parent_map gcc_xo_gpll0_map[] = {
        { P_XO, 0 },
        { P_GPLL0, 1 },
@@ -956,6 +951,11 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
        },
 };
 
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+       { .fw_name = "xo" },
+       { .hw = &gpll0.clkr.hw },
+};
+
 static const struct freq_tbl ftbl_pcie_axi_clk_src[] = {
        F(19200000, P_XO, 1, 0, 0),
        F(200000000, P_GPLL0, 4, 0, 0),
@@ -969,7 +969,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
        .parent_map = gcc_xo_gpll0_map,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pcie0_axi_clk_src",
-               .parent_names = gcc_xo_gpll0,
+               .parent_data = gcc_xo_gpll0,
                .num_parents = 2,
                .ops = &clk_rcg2_ops,
        },
@@ -1016,7 +1016,7 @@ static struct clk_rcg2 pcie1_axi_clk_src = {
        .parent_map = gcc_xo_gpll0_map,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pcie1_axi_clk_src",
-               .parent_names = gcc_xo_gpll0,
+               .parent_data = gcc_xo_gpll0,
                .num_parents = 2,
                .ops = &clk_rcg2_ops,
        },
@@ -1074,7 +1074,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
                .name = "sdcc1_apps_clk_src",
                .parent_names = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -1330,7 +1330,7 @@ static struct clk_rcg2 nss_ce_clk_src = {
        .parent_map = gcc_xo_gpll0_map,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "nss_ce_clk_src",
-               .parent_names = gcc_xo_gpll0,
+               .parent_data = gcc_xo_gpll0,
                .num_parents = 2,
                .ops = &clk_rcg2_ops,
        },
@@ -4329,8 +4329,7 @@ static struct clk_rcg2 pcie0_rchng_clk_src = {
        .parent_map = gcc_xo_gpll0_map,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pcie0_rchng_clk_src",
-               .parent_hws = (const struct clk_hw *[]) {
-                               &gpll0.clkr.hw },
+               .parent_data = gcc_xo_gpll0,
                .num_parents = 2,
                .ops = &clk_rcg2_ops,
        },
index f094999..6b702cd 100644 (file)
@@ -77,6 +77,7 @@ static struct clk_alpha_pll gpll4_early = {
 
 static struct clk_alpha_pll_postdiv gpll4 = {
        .offset = 0x1dc0,
+       .width = 4,
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gpll4",
index 9b1674b..e161637 100644 (file)
 enum {
        P_XO,
        P_GPLL0,
-       P_GPLL2,
-       P_GPLL3,
-       P_GPLL1,
-       P_GPLL2_EARLY,
        P_GPLL0_EARLY_DIV,
        P_SLEEP_CLK,
        P_GPLL4,
        P_AUD_REF_CLK,
-       P_GPLL1_EARLY_DIV
-};
-
-static const struct parent_map gcc_sleep_clk_map[] = {
-       { P_SLEEP_CLK, 5 }
-};
-
-static const char * const gcc_sleep_clk[] = {
-       "sleep_clk"
-};
-
-static const struct parent_map gcc_xo_gpll0_map[] = {
-       { P_XO, 0 },
-       { P_GPLL0, 1 }
-};
-
-static const char * const gcc_xo_gpll0[] = {
-       "xo",
-       "gpll0"
-};
-
-static const struct parent_map gcc_xo_sleep_clk_map[] = {
-       { P_XO, 0 },
-       { P_SLEEP_CLK, 5 }
-};
-
-static const char * const gcc_xo_sleep_clk[] = {
-       "xo",
-       "sleep_clk"
-};
-
-static const struct parent_map gcc_xo_gpll0_gpll0_early_div_map[] = {
-       { P_XO, 0 },
-       { P_GPLL0, 1 },
-       { P_GPLL0_EARLY_DIV, 6 }
-};
-
-static const char * const gcc_xo_gpll0_gpll0_early_div[] = {
-       "xo",
-       "gpll0",
-       "gpll0_early_div"
-};
-
-static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
-       { P_XO, 0 },
-       { P_GPLL0, 1 },
-       { P_GPLL4, 5 }
-};
-
-static const char * const gcc_xo_gpll0_gpll4[] = {
-       "xo",
-       "gpll0",
-       "gpll4"
-};
-
-static const struct parent_map gcc_xo_gpll0_aud_ref_clk_map[] = {
-       { P_XO, 0 },
-       { P_GPLL0, 1 },
-       { P_AUD_REF_CLK, 2 }
-};
-
-static const char * const gcc_xo_gpll0_aud_ref_clk[] = {
-       "xo",
-       "gpll0",
-       "aud_ref_clk"
-};
-
-static const struct parent_map gcc_xo_gpll0_sleep_clk_gpll0_early_div_map[] = {
-       { P_XO, 0 },
-       { P_GPLL0, 1 },
-       { P_SLEEP_CLK, 5 },
-       { P_GPLL0_EARLY_DIV, 6 }
-};
-
-static const char * const gcc_xo_gpll0_sleep_clk_gpll0_early_div[] = {
-       "xo",
-       "gpll0",
-       "sleep_clk",
-       "gpll0_early_div"
-};
-
-static const struct parent_map gcc_xo_gpll0_gpll4_gpll0_early_div_map[] = {
-       { P_XO, 0 },
-       { P_GPLL0, 1 },
-       { P_GPLL4, 5 },
-       { P_GPLL0_EARLY_DIV, 6 }
-};
-
-static const char * const gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
-       "xo",
-       "gpll0",
-       "gpll4",
-       "gpll0_early_div"
-};
-
-static const struct parent_map gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map[] = {
-       { P_XO, 0 },
-       { P_GPLL0, 1 },
-       { P_GPLL1_EARLY_DIV, 3 },
-       { P_GPLL1, 4 },
-       { P_GPLL4, 5 },
-       { P_GPLL0_EARLY_DIV, 6 }
-};
-
-static const char * const gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div[] = {
-       "xo",
-       "gpll0",
-       "gpll1_early_div",
-       "gpll1",
-       "gpll4",
-       "gpll0_early_div"
-};
-
-static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div_map[] = {
-       { P_XO, 0 },
-       { P_GPLL0, 1 },
-       { P_GPLL2, 2 },
-       { P_GPLL3, 3 },
-       { P_GPLL1, 4 },
-       { P_GPLL2_EARLY, 5 },
-       { P_GPLL0_EARLY_DIV, 6 }
-};
-
-static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div[] = {
-       "xo",
-       "gpll0",
-       "gpll2",
-       "gpll3",
-       "gpll1",
-       "gpll2_early",
-       "gpll0_early_div"
 };
 
 static struct clk_fixed_factor xo = {
@@ -173,7 +38,9 @@ static struct clk_fixed_factor xo = {
        .div = 1,
        .hw.init = &(struct clk_init_data){
                .name = "xo",
-               .parent_names = (const char *[]){ "xo_board" },
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "cxo", .name = "xo_board",
+               },
                .num_parents = 1,
                .ops = &clk_fixed_factor_ops,
        },
@@ -187,7 +54,9 @@ static struct clk_alpha_pll gpll0_early = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gpll0_early",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo", .name = "xo_board",
+                       },
                        .num_parents = 1,
                        .ops = &clk_alpha_pll_ops,
                },
@@ -199,7 +68,9 @@ static struct clk_fixed_factor gpll0_early_div = {
        .div = 2,
        .hw.init = &(struct clk_init_data){
                .name = "gpll0_early_div",
-               .parent_names = (const char *[]){ "gpll0_early" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &gpll0_early.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_fixed_factor_ops,
        },
@@ -210,7 +81,9 @@ static struct clk_alpha_pll_postdiv gpll0 = {
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gpll0",
-               .parent_names = (const char *[]){ "gpll0_early" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &gpll0_early.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_alpha_pll_postdiv_ops,
        },
@@ -223,7 +96,9 @@ static struct clk_branch gcc_mmss_gpll0_div_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_mmss_gpll0_div_clk",
-                       .parent_names = (const char *[]){ "gpll0" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gpll0.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -238,7 +113,9 @@ static struct clk_branch gcc_mss_gpll0_div_clk = {
                .enable_mask = BIT(2),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_mss_gpll0_div_clk",
-                       .parent_names = (const char *[]){ "gpll0" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gpll0.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops
@@ -254,7 +131,9 @@ static struct clk_alpha_pll gpll4_early = {
                .enable_mask = BIT(4),
                .hw.init = &(struct clk_init_data){
                        .name = "gpll4_early",
-                       .parent_names = (const char *[]){ "xo" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "cxo", .name = "xo_board",
+                       },
                        .num_parents = 1,
                        .ops = &clk_alpha_pll_ops,
                },
@@ -266,12 +145,106 @@ static struct clk_alpha_pll_postdiv gpll4 = {
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gpll4",
-               .parent_names = (const char *[]){ "gpll4_early" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &gpll4_early.clkr.hw,
+               },
                .num_parents = 1,
                .ops = &clk_alpha_pll_postdiv_ops,
        },
 };
 
+static const struct parent_map gcc_sleep_clk_map[] = {
+       { P_SLEEP_CLK, 5 }
+};
+
+static const struct clk_parent_data gcc_sleep_clk[] = {
+       { .fw_name = "sleep_clk", .name = "sleep_clk" }
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+       { .fw_name = "cxo", .name = "xo_board" },
+       { .hw = &gpll0.clkr.hw }
+};
+
+static const struct parent_map gcc_xo_sleep_clk_map[] = {
+       { P_XO, 0 },
+       { P_SLEEP_CLK, 5 }
+};
+
+static const struct clk_parent_data gcc_xo_sleep_clk[] = {
+       { .fw_name = "cxo", .name = "xo_board" },
+       { .fw_name = "sleep_clk", .name = "sleep_clk" }
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_early_div_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0_early_div[] = {
+       { .fw_name = "cxo", .name = "xo_board" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll0_early_div.hw }
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL4, 5 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
+       { .fw_name = "cxo", .name = "xo_board" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll4.clkr.hw }
+};
+
+static const struct parent_map gcc_xo_gpll0_aud_ref_clk_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_AUD_REF_CLK, 2 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_aud_ref_clk[] = {
+       { .fw_name = "cxo", .name = "xo_board" },
+       { .hw = &gpll0.clkr.hw },
+       { .fw_name = "aud_ref_clk", .name = "aud_ref_clk" }
+};
+
+static const struct parent_map gcc_xo_gpll0_sleep_clk_gpll0_early_div_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_SLEEP_CLK, 5 },
+       { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_sleep_clk_gpll0_early_div[] = {
+       { .fw_name = "cxo", .name = "xo_board" },
+       { .hw = &gpll0.clkr.hw },
+       { .fw_name = "sleep_clk", .name = "sleep_clk" },
+       { .hw = &gpll0_early_div.hw }
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_gpll0_early_div_map[] = {
+       { P_XO, 0 },
+       { P_GPLL0, 1 },
+       { P_GPLL4, 5 },
+       { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+       { .fw_name = "cxo", .name = "xo_board" },
+       { .hw = &gpll0.clkr.hw },
+       { .hw = &gpll4.clkr.hw },
+       { .hw = &gpll0_early_div.hw }
+};
+
 static const struct freq_tbl ftbl_system_noc_clk_src[] = {
        F(19200000, P_XO, 1, 0, 0),
        F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
@@ -285,12 +258,12 @@ static const struct freq_tbl ftbl_system_noc_clk_src[] = {
 static struct clk_rcg2 system_noc_clk_src = {
        .cmd_rcgr = 0x0401c,
        .hid_width = 5,
-       .parent_map = gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div_map,
+       .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
        .freq_tbl = ftbl_system_noc_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "system_noc_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div,
-               .num_parents = 7,
+               .parent_data = gcc_xo_gpll0_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -309,8 +282,8 @@ static struct clk_rcg2 config_noc_clk_src = {
        .freq_tbl = ftbl_config_noc_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "config_noc_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -331,8 +304,8 @@ static struct clk_rcg2 periph_noc_clk_src = {
        .freq_tbl = ftbl_periph_noc_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "periph_noc_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -352,8 +325,8 @@ static struct clk_rcg2 usb30_master_clk_src = {
        .freq_tbl = ftbl_usb30_master_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "usb30_master_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll0_early_div,
-               .num_parents = 3,
+               .parent_data = gcc_xo_gpll0_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -370,8 +343,8 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
        .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "usb30_mock_utmi_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll0_early_div,
-               .num_parents = 3,
+               .parent_data = gcc_xo_gpll0_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -388,8 +361,8 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
        .freq_tbl = ftbl_usb3_phy_aux_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "usb3_phy_aux_clk_src",
-               .parent_names = gcc_xo_sleep_clk,
-               .num_parents = 2,
+               .parent_data = gcc_xo_sleep_clk,
+               .num_parents = ARRAY_SIZE(gcc_xo_sleep_clk),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -407,8 +380,8 @@ static struct clk_rcg2 usb20_master_clk_src = {
        .freq_tbl = ftbl_usb20_master_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "usb20_master_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll0_early_div,
-               .num_parents = 3,
+               .parent_data = gcc_xo_gpll0_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -420,8 +393,8 @@ static struct clk_rcg2 usb20_mock_utmi_clk_src = {
        .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "usb20_mock_utmi_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll0_early_div,
-               .num_parents = 3,
+               .parent_data = gcc_xo_gpll0_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -446,8 +419,8 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
        .freq_tbl = ftbl_sdcc1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "sdcc1_apps_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
-               .num_parents = 4,
+               .parent_data = gcc_xo_gpll0_gpll4_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_gpll0_early_div),
                .ops = &clk_rcg2_floor_ops,
        },
 };
@@ -466,8 +439,8 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
        .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "sdcc1_ice_core_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
-               .num_parents = 4,
+               .parent_data = gcc_xo_gpll0_gpll4_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -491,8 +464,8 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
        .freq_tbl = ftbl_sdcc2_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "sdcc2_apps_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll4,
-               .num_parents = 3,
+               .parent_data = gcc_xo_gpll0_gpll4,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
                .ops = &clk_rcg2_floor_ops,
        },
 };
@@ -505,8 +478,8 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
        .freq_tbl = ftbl_sdcc2_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "sdcc3_apps_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll4,
-               .num_parents = 3,
+               .parent_data = gcc_xo_gpll0_gpll4,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
                .ops = &clk_rcg2_floor_ops,
        },
 };
@@ -529,8 +502,8 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
        .freq_tbl = ftbl_sdcc4_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "sdcc4_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_floor_ops,
        },
 };
@@ -554,8 +527,8 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup1_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -573,8 +546,8 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup1_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -606,8 +579,8 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_uart1_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -620,8 +593,8 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup2_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -633,8 +606,8 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup2_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -647,8 +620,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_uart2_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -661,8 +634,8 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup3_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -674,8 +647,8 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup3_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -688,8 +661,8 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_uart3_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -702,8 +675,8 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup4_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -715,8 +688,8 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup4_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -729,8 +702,8 @@ static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_uart4_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -743,8 +716,8 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup5_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -756,8 +729,8 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup5_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -770,8 +743,8 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_uart5_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -784,8 +757,8 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup6_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -797,8 +770,8 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_qup6_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -811,8 +784,8 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp1_uart6_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -825,8 +798,8 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup1_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -838,8 +811,8 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup1_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -852,8 +825,8 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_uart1_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -866,8 +839,8 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup2_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -879,8 +852,8 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup2_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -893,8 +866,8 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_uart2_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -907,8 +880,8 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup3_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -920,8 +893,8 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup3_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -934,8 +907,8 @@ static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_uart3_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -948,8 +921,8 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup4_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -961,8 +934,8 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup4_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -975,8 +948,8 @@ static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_uart4_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -989,8 +962,8 @@ static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup5_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1002,8 +975,8 @@ static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup5_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1016,8 +989,8 @@ static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_uart5_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1030,8 +1003,8 @@ static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup6_spi_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1043,8 +1016,8 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_qup6_i2c_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1057,8 +1030,8 @@ static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
        .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "blsp2_uart6_apps_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1075,8 +1048,8 @@ static struct clk_rcg2 pdm2_clk_src = {
        .freq_tbl = ftbl_pdm2_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pdm2_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1094,8 +1067,8 @@ static struct clk_rcg2 tsif_ref_clk_src = {
        .freq_tbl = ftbl_tsif_ref_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "tsif_ref_clk_src",
-               .parent_names = gcc_xo_gpll0_aud_ref_clk,
-               .num_parents = 3,
+               .parent_data = gcc_xo_gpll0_aud_ref_clk,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_aud_ref_clk),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1106,8 +1079,8 @@ static struct clk_rcg2 gcc_sleep_clk_src = {
        .parent_map = gcc_sleep_clk_map,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gcc_sleep_clk_src",
-               .parent_names = gcc_sleep_clk,
-               .num_parents = 1,
+               .parent_data = gcc_sleep_clk,
+               .num_parents = ARRAY_SIZE(gcc_sleep_clk),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1119,8 +1092,8 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
        .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "hmss_rbcpr_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1131,8 +1104,8 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
        .parent_map = gcc_xo_gpll0_map,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "hmss_gpll0_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1152,8 +1125,8 @@ static struct clk_rcg2 gp1_clk_src = {
        .freq_tbl = ftbl_gp1_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gp1_clk_src",
-               .parent_names = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
-               .num_parents = 4,
+               .parent_data = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_sleep_clk_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1166,8 +1139,8 @@ static struct clk_rcg2 gp2_clk_src = {
        .freq_tbl = ftbl_gp1_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gp2_clk_src",
-               .parent_names = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
-               .num_parents = 4,
+               .parent_data = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_sleep_clk_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1180,8 +1153,8 @@ static struct clk_rcg2 gp3_clk_src = {
        .freq_tbl = ftbl_gp1_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gp3_clk_src",
-               .parent_names = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
-               .num_parents = 4,
+               .parent_data = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_sleep_clk_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1199,8 +1172,8 @@ static struct clk_rcg2 pcie_aux_clk_src = {
        .freq_tbl = ftbl_pcie_aux_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "pcie_aux_clk_src",
-               .parent_names = gcc_xo_sleep_clk,
-               .num_parents = 2,
+               .parent_data = gcc_xo_sleep_clk,
+               .num_parents = ARRAY_SIZE(gcc_xo_sleep_clk),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1220,8 +1193,8 @@ static struct clk_rcg2 ufs_axi_clk_src = {
        .freq_tbl = ftbl_ufs_axi_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "ufs_axi_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1240,8 +1213,8 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
        .freq_tbl = ftbl_ufs_ice_core_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "ufs_ice_core_clk_src",
-               .parent_names = gcc_xo_gpll0,
-               .num_parents = 2,
+               .parent_data = gcc_xo_gpll0,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1257,12 +1230,12 @@ static const struct freq_tbl ftbl_qspi_ser_clk_src[] = {
 static struct clk_rcg2 qspi_ser_clk_src = {
        .cmd_rcgr = 0x8b00c,
        .hid_width = 5,
-       .parent_map = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map,
+       .parent_map = gcc_xo_gpll0_gpll4_gpll0_early_div_map,
        .freq_tbl = ftbl_qspi_ser_clk_src,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "qspi_ser_clk_src",
-               .parent_names = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div,
-               .num_parents = 6,
+               .parent_data = gcc_xo_gpll0_gpll4_gpll0_early_div,
+               .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_gpll0_early_div),
                .ops = &clk_rcg2_ops,
        },
 };
@@ -1274,7 +1247,9 @@ static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sys_noc_usb3_axi_clk",
-                       .parent_names = (const char *[]){ "usb30_master_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb30_master_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1289,7 +1264,9 @@ static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sys_noc_ufs_axi_clk",
-                       .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ufs_axi_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1304,7 +1281,9 @@ static struct clk_branch gcc_periph_noc_usb20_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_periph_noc_usb20_ahb_clk",
-                       .parent_names = (const char *[]){ "usb20_master_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb20_master_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1319,7 +1298,9 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_mmss_noc_cfg_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
                        .ops = &clk_branch2_ops,
@@ -1347,7 +1328,9 @@ static struct clk_branch gcc_usb30_master_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb30_master_clk",
-                       .parent_names = (const char *[]){ "usb30_master_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb30_master_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1362,7 +1345,9 @@ static struct clk_branch gcc_usb30_sleep_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb30_sleep_clk",
-                       .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gcc_sleep_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1377,7 +1362,9 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb30_mock_utmi_clk",
-                       .parent_names = (const char *[]){ "usb30_mock_utmi_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb30_mock_utmi_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1392,7 +1379,9 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb3_phy_aux_clk",
-                       .parent_names = (const char *[]){ "usb3_phy_aux_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb3_phy_aux_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1408,7 +1397,9 @@ static struct clk_branch gcc_usb3_phy_pipe_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb3_phy_pipe_clk",
-                       .parent_names = (const char *[]){ "usb3_phy_pipe_clk_src" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "usb3_phy_pipe_clk_src", .name = "usb3_phy_pipe_clk_src",
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1423,7 +1414,9 @@ static struct clk_branch gcc_usb20_master_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb20_master_clk",
-                       .parent_names = (const char *[]){ "usb20_master_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb20_master_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1438,7 +1431,9 @@ static struct clk_branch gcc_usb20_sleep_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb20_sleep_clk",
-                       .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gcc_sleep_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1453,7 +1448,9 @@ static struct clk_branch gcc_usb20_mock_utmi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb20_mock_utmi_clk",
-                       .parent_names = (const char *[]){ "usb20_mock_utmi_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb20_mock_utmi_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1468,7 +1465,9 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb_phy_cfg_ahb2phy_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1483,7 +1482,9 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc1_apps_clk",
-                       .parent_names = (const char *[]){ "sdcc1_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sdcc1_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1498,7 +1499,9 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc1_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1513,7 +1516,9 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc1_ice_core_clk",
-                       .parent_names = (const char *[]){ "sdcc1_ice_core_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sdcc1_ice_core_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1528,7 +1533,9 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc2_apps_clk",
-                       .parent_names = (const char *[]){ "sdcc2_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sdcc2_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1543,7 +1550,9 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc2_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1558,7 +1567,9 @@ static struct clk_branch gcc_sdcc3_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc3_apps_clk",
-                       .parent_names = (const char *[]){ "sdcc3_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sdcc3_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1573,7 +1584,9 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc3_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1588,7 +1601,9 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc4_apps_clk",
-                       .parent_names = (const char *[]){ "sdcc4_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &sdcc4_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1603,7 +1618,9 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc4_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1619,7 +1636,9 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
                .enable_mask = BIT(17),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1635,7 +1654,9 @@ static struct clk_branch gcc_blsp1_sleep_clk = {
                .enable_mask = BIT(16),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_sleep_clk",
-                       .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gcc_sleep_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1650,7 +1671,9 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup1_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup1_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1665,7 +1688,9 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup1_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup1_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1680,7 +1705,9 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_uart1_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_uart1_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_uart1_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1695,7 +1722,9 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup2_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup2_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1710,7 +1739,9 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup2_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup2_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1725,7 +1756,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_uart2_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_uart2_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_uart2_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1740,7 +1773,9 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup3_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup3_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1755,7 +1790,9 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup3_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup3_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1770,7 +1807,9 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_uart3_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_uart3_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_uart3_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1785,7 +1824,9 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup4_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup4_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1800,7 +1841,9 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup4_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup4_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1815,7 +1858,9 @@ static struct clk_branch gcc_blsp1_uart4_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_uart4_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_uart4_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_uart4_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1830,7 +1875,9 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup5_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup5_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup5_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1845,7 +1892,9 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup5_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup5_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1860,7 +1909,9 @@ static struct clk_branch gcc_blsp1_uart5_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_uart5_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_uart5_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_uart5_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1875,7 +1926,9 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup6_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup6_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup6_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1890,7 +1943,9 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_qup6_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_qup6_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1905,7 +1960,9 @@ static struct clk_branch gcc_blsp1_uart6_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_uart6_apps_clk",
-                       .parent_names = (const char *[]){ "blsp1_uart6_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp1_uart6_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1921,7 +1978,9 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
                .enable_mask = BIT(15),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1937,7 +1996,9 @@ static struct clk_branch gcc_blsp2_sleep_clk = {
                .enable_mask = BIT(14),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_sleep_clk",
-                       .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gcc_sleep_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1952,7 +2013,9 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup1_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup1_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup1_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1967,7 +2030,9 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup1_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup1_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1982,7 +2047,9 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_uart1_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_uart1_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_uart1_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1997,7 +2064,9 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup2_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup2_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup2_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2012,7 +2081,9 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup2_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup2_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2027,7 +2098,9 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_uart2_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_uart2_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_uart2_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2042,7 +2115,9 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup3_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup3_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup3_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2057,7 +2132,9 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup3_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup3_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2072,7 +2149,9 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_uart3_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_uart3_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_uart3_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2087,7 +2166,9 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup4_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup4_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup4_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2102,7 +2183,9 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup4_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup4_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2117,7 +2200,9 @@ static struct clk_branch gcc_blsp2_uart4_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_uart4_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_uart4_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_uart4_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2132,7 +2217,9 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup5_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup5_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup5_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2147,7 +2234,9 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup5_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup5_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup5_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2162,7 +2251,9 @@ static struct clk_branch gcc_blsp2_uart5_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_uart5_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_uart5_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_uart5_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2177,7 +2268,9 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup6_spi_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup6_spi_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup6_spi_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2192,7 +2285,9 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_qup6_i2c_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_qup6_i2c_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_qup6_i2c_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2207,7 +2302,9 @@ static struct clk_branch gcc_blsp2_uart6_apps_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_uart6_apps_clk",
-                       .parent_names = (const char *[]){ "blsp2_uart6_apps_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &blsp2_uart6_apps_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2222,7 +2319,9 @@ static struct clk_branch gcc_pdm_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pdm_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2237,7 +2336,9 @@ static struct clk_branch gcc_pdm2_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pdm2_clk",
-                       .parent_names = (const char *[]){ "pdm2_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &pdm2_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2253,7 +2354,9 @@ static struct clk_branch gcc_prng_ahb_clk = {
                .enable_mask = BIT(13),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_prng_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2268,7 +2371,9 @@ static struct clk_branch gcc_tsif_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_tsif_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2283,7 +2388,9 @@ static struct clk_branch gcc_tsif_ref_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_tsif_ref_clk",
-                       .parent_names = (const char *[]){ "tsif_ref_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &tsif_ref_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2298,7 +2405,9 @@ static struct clk_branch gcc_tsif_inactivity_timers_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_tsif_inactivity_timers_clk",
-                       .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gcc_sleep_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2314,7 +2423,9 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
                .enable_mask = BIT(10),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_boot_rom_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2342,7 +2453,9 @@ static struct clk_branch gcc_hmss_rbcpr_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_hmss_rbcpr_clk",
-                       .parent_names = (const char *[]){ "hmss_rbcpr_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &hmss_rbcpr_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2357,7 +2470,9 @@ static struct clk_branch gcc_gp1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_gp1_clk",
-                       .parent_names = (const char *[]){ "gp1_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gp1_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2372,7 +2487,9 @@ static struct clk_branch gcc_gp2_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_gp2_clk",
-                       .parent_names = (const char *[]){ "gp2_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gp2_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2387,7 +2504,9 @@ static struct clk_branch gcc_gp3_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_gp3_clk",
-                       .parent_names = (const char *[]){ "gp3_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gp3_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2402,7 +2521,9 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_0_slv_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2417,7 +2538,9 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_0_mstr_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2432,7 +2555,9 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_0_cfg_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2447,7 +2572,9 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_0_aux_clk",
-                       .parent_names = (const char *[]){ "pcie_aux_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &pcie_aux_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2463,7 +2590,9 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_0_pipe_clk",
-                       .parent_names = (const char *[]){ "pcie_0_pipe_clk_src" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "pcie_0_pipe_clk_src", .name = "pcie_0_pipe_clk_src",
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2478,7 +2607,9 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_1_slv_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2493,7 +2624,9 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_1_mstr_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2508,7 +2641,9 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_1_cfg_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2523,7 +2658,9 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_1_aux_clk",
-                       .parent_names = (const char *[]){ "pcie_aux_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &pcie_aux_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2539,7 +2676,9 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_1_pipe_clk",
-                       .parent_names = (const char *[]){ "pcie_1_pipe_clk_src" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "pcie_1_pipe_clk_src", .name = "pcie_1_pipe_clk_src",
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2554,7 +2693,9 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_2_slv_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2569,7 +2710,9 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_2_mstr_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2584,7 +2727,9 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_2_cfg_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2599,7 +2744,9 @@ static struct clk_branch gcc_pcie_2_aux_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_2_aux_clk",
-                       .parent_names = (const char *[]){ "pcie_aux_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &pcie_aux_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2615,7 +2762,9 @@ static struct clk_branch gcc_pcie_2_pipe_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_2_pipe_clk",
-                       .parent_names = (const char *[]){ "pcie_2_pipe_clk_src" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "pcie_2_pipe_clk_src", .name = "pcie_2_pipe_clk_src",
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2630,7 +2779,9 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_phy_cfg_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2645,7 +2796,9 @@ static struct clk_branch gcc_pcie_phy_aux_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_phy_aux_clk",
-                       .parent_names = (const char *[]){ "pcie_aux_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &pcie_aux_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2660,7 +2813,9 @@ static struct clk_branch gcc_ufs_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_axi_clk",
-                       .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ufs_axi_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2675,7 +2830,9 @@ static struct clk_branch gcc_ufs_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2688,7 +2845,9 @@ static struct clk_fixed_factor ufs_tx_cfg_clk_src = {
        .div = 16,
        .hw.init = &(struct clk_init_data){
                .name = "ufs_tx_cfg_clk_src",
-               .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &ufs_axi_clk_src.clkr.hw,
+               },
                .num_parents = 1,
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_fixed_factor_ops,
@@ -2702,7 +2861,9 @@ static struct clk_branch gcc_ufs_tx_cfg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_tx_cfg_clk",
-                       .parent_names = (const char *[]){ "ufs_tx_cfg_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ufs_tx_cfg_clk_src.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2715,7 +2876,9 @@ static struct clk_fixed_factor ufs_rx_cfg_clk_src = {
        .div = 16,
        .hw.init = &(struct clk_init_data){
                .name = "ufs_rx_cfg_clk_src",
-               .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &ufs_axi_clk_src.clkr.hw,
+               },
                .num_parents = 1,
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_fixed_factor_ops,
@@ -2755,7 +2918,9 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_rx_cfg_clk",
-                       .parent_names = (const char *[]){ "ufs_rx_cfg_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ufs_rx_cfg_clk_src.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2771,7 +2936,9 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_tx_symbol_0_clk",
-                       .parent_names = (const char *[]){ "ufs_tx_symbol_0_clk_src" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "ufs_tx_symbol_0_clk_src", .name = "ufs_tx_symbol_0_clk_src",
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2787,7 +2954,9 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_rx_symbol_0_clk",
-                       .parent_names = (const char *[]){ "ufs_rx_symbol_0_clk_src" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "ufs_rx_symbol_0_clk_src", .name = "ufs_rx_symbol_0_clk_src",
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2803,7 +2972,9 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_rx_symbol_1_clk",
-                       .parent_names = (const char *[]){ "ufs_rx_symbol_1_clk_src" },
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "ufs_rx_symbol_1_clk_src", .name = "ufs_rx_symbol_1_clk_src",
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2816,7 +2987,9 @@ static struct clk_fixed_factor ufs_ice_core_postdiv_clk_src = {
        .div = 2,
        .hw.init = &(struct clk_init_data){
                .name = "ufs_ice_core_postdiv_clk_src",
-               .parent_names = (const char *[]){ "ufs_ice_core_clk_src" },
+               .parent_hws = (const struct clk_hw*[]){
+                       &ufs_ice_core_clk_src.clkr.hw,
+               },
                .num_parents = 1,
                .flags = CLK_SET_RATE_PARENT,
                .ops = &clk_fixed_factor_ops,
@@ -2830,7 +3003,9 @@ static struct clk_branch gcc_ufs_unipro_core_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_unipro_core_clk",
-                       .parent_names = (const char *[]){ "ufs_ice_core_postdiv_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ufs_ice_core_postdiv_clk_src.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2845,7 +3020,9 @@ static struct clk_branch gcc_ufs_ice_core_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_ice_core_clk",
-                       .parent_names = (const char *[]){ "ufs_ice_core_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ufs_ice_core_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2884,7 +3061,9 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_aggre0_snoc_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
                        .ops = &clk_branch2_ops,
@@ -2899,7 +3078,9 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_aggre0_cnoc_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
                        .ops = &clk_branch2_ops,
@@ -2914,7 +3095,9 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_smmu_aggre0_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
                        .ops = &clk_branch2_ops,
@@ -2929,7 +3112,9 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_smmu_aggre0_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
                        .ops = &clk_branch2_ops,
@@ -2944,7 +3129,9 @@ static struct clk_branch gcc_aggre2_ufs_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_aggre2_ufs_axi_clk",
-                       .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &ufs_axi_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2959,7 +3146,9 @@ static struct clk_branch gcc_aggre2_usb3_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_aggre2_usb3_axi_clk",
-                       .parent_names = (const char *[]){ "usb30_master_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &usb30_master_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -2974,7 +3163,9 @@ static struct clk_branch gcc_dcc_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_dcc_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -2988,7 +3179,9 @@ static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3002,7 +3195,9 @@ static struct clk_branch gcc_qspi_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_qspi_ahb_clk",
-                       .parent_names = (const char *[]){ "periph_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &periph_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -3017,7 +3212,9 @@ static struct clk_branch gcc_qspi_ser_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_qspi_ser_clk",
-                       .parent_names = (const char *[]){ "qspi_ser_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &qspi_ser_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -3151,7 +3348,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_mss_cfg_ahb_clk",
-                       .parent_names = (const char *[]){ "config_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &config_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3165,7 +3364,9 @@ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_mss_mnoc_bimc_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3179,7 +3380,9 @@ static struct clk_branch gcc_mss_snoc_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_mss_snoc_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
@@ -3193,7 +3396,9 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_mss_q6_bimc_axi_clk",
-                       .parent_names = (const char *[]){ "system_noc_clk_src" },
+                       .parent_hws = (const struct clk_hw*[]){
+                               &system_noc_clk_src.clkr.hw,
+                       },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
index 431b55b..cf3af88 100644 (file)
@@ -4151,7 +4151,7 @@ static int gcc_sm6125_probe(struct platform_device *pdev)
 
        /*
         * Enable DUAL_EDGE mode for MCLK RCGs
-        * This is requierd to enable MND divider mode
+        * This is required to enable MND divider mode
         */
        regmap_update_bits(regmap, 0x51004, 0x3000, 0x2000);
        regmap_update_bits(regmap, 0x51020, 0x3000, 0x2000);
index 2457944..09cf827 100644 (file)
@@ -3448,22 +3448,67 @@ static struct clk_branch gcc_video_xo_clk = {
        },
 };
 
+static struct gdsc pcie_0_gdsc = {
+       .gdscr = 0x6b004,
+       .pd = {
+               .name = "pcie_0_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_1_gdsc = {
+       .gdscr = 0x8d004,
+       .pd = {
+               .name = "pcie_1_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_card_gdsc = {
+       .gdscr = 0x75004,
+       .pd = {
+               .name = "ufs_card_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+       .gdscr = 0x77004,
+       .pd = {
+               .name = "ufs_phy_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc emac_gdsc = {
+       .gdscr = 0x6004,
+       .pd = {
+               .name = "emac_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = POLL_CFG_GDSCR,
+};
+
 static struct gdsc usb30_prim_gdsc = {
-               .gdscr = 0xf004,
-               .pd = {
-                       .name = "usb30_prim_gdsc",
-               },
-               .pwrsts = PWRSTS_OFF_ON,
-               .flags = POLL_CFG_GDSCR,
+       .gdscr = 0xf004,
+       .pd = {
+               .name = "usb30_prim_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = POLL_CFG_GDSCR,
 };
 
 static struct gdsc usb30_sec_gdsc = {
-               .gdscr = 0x10004,
-               .pd = {
-                       .name = "usb30_sec_gdsc",
-               },
-               .pwrsts = PWRSTS_OFF_ON,
-               .flags = POLL_CFG_GDSCR,
+       .gdscr = 0x10004,
+       .pd = {
+               .name = "usb30_sec_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = POLL_CFG_GDSCR,
 };
 
 static struct clk_regmap *gcc_sm8150_clocks[] = {
@@ -3714,6 +3759,11 @@ static const struct qcom_reset_map gcc_sm8150_resets[] = {
 };
 
 static struct gdsc *gcc_sm8150_gdscs[] = {
+       [EMAC_GDSC] = &emac_gdsc,
+       [PCIE_0_GDSC] = &pcie_0_gdsc,
+       [PCIE_1_GDSC] = &pcie_1_gdsc,
+       [UFS_CARD_GDSC] = &ufs_card_gdsc,
+       [UFS_PHY_GDSC] = &ufs_phy_gdsc,
        [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
        [USB30_SEC_GDSC] = &usb30_sec_gdsc,
 };
index 41bba96..d6b38a0 100644 (file)
@@ -29,7 +29,6 @@
 
 enum {
        P_GPU_XO,
-       P_CORE_BI_PLL_TEST_SE,
        P_GPLL0_OUT_MAIN,
        P_GPLL0_OUT_MAIN_DIV,
        P_GPU_PLL0_PLL_OUT_MAIN,
@@ -66,8 +65,8 @@ static struct clk_alpha_pll gpu_pll0_pll_out_main = {
        .num_vco = ARRAY_SIZE(gpu_vco),
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gpu_pll0_pll_out_main",
-               .parent_data =  &(const struct clk_parent_data){
-                       .hw = &gpucc_cxo_clk.clkr.hw,
+               .parent_hws = (const struct clk_hw*[]){
+                       &gpucc_cxo_clk.clkr.hw,
                },
                .num_parents = 1,
                .ops = &clk_alpha_pll_ops,
@@ -81,8 +80,8 @@ static struct clk_alpha_pll gpu_pll1_pll_out_main = {
        .num_vco = ARRAY_SIZE(gpu_vco),
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gpu_pll1_pll_out_main",
-               .parent_data = &(const struct clk_parent_data){
-                       .hw = &gpucc_cxo_clk.clkr.hw,
+               .parent_hws = (const struct clk_hw*[]){
+                       &gpucc_cxo_clk.clkr.hw,
                },
                .num_parents = 1,
                .ops = &clk_alpha_pll_ops,
@@ -135,8 +134,8 @@ static struct clk_branch gpucc_gfx3d_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gpucc_gfx3d_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &gfx3d_clk_src.rcg.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gfx3d_clk_src.rcg.clkr.hw,
                        },
                        .num_parents = 1,
                        .ops = &clk_branch2_ops,
@@ -204,8 +203,8 @@ static struct clk_branch gpucc_rbbmtimer_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gpucc_rbbmtimer_clk",
-                       .parent_names = (const char *[]){
-                               "rbbmtimer_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &rbbmtimer_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -222,8 +221,8 @@ static struct clk_branch gpucc_rbcpr_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gpucc_rbcpr_clk",
-                       .parent_names = (const char *[]){
-                               "rbcpr_clk_src",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &rbcpr_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
new file mode 100644 (file)
index 0000000..ef15185
--- /dev/null
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sm6350.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK         0xF
+#define CX_GMU_CBCR_SLEEP_SHIFT                4
+#define CX_GMU_CBCR_WAKE_MASK          0xF
+#define CX_GMU_CBCR_WAKE_SHIFT         8
+
+enum {
+       P_BI_TCXO,
+       P_GPLL0_OUT_MAIN,
+       P_GPLL0_OUT_MAIN_DIV,
+       P_GPU_CC_PLL0_OUT_MAIN,
+       P_GPU_CC_PLL0_OUT_ODD,
+       P_GPU_CC_PLL1_OUT_EVEN,
+       P_GPU_CC_PLL1_OUT_MAIN,
+       P_GPU_CC_PLL1_OUT_ODD,
+       P_CRC_DIV,
+};
+
+static const struct pll_vco fabia_vco[] = {
+       { 249600000, 2000000000, 0 },
+};
+
+/* 506MHz Configuration*/
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+       .l = 0x1A,
+       .alpha = 0x5AAA,
+       .config_ctl_val = 0x20485699,
+       .config_ctl_hi_val = 0x00002067,
+       .test_ctl_val = 0x40000000,
+       .test_ctl_hi_val = 0x00000002,
+       .user_ctl_val = 0x00000001,
+       .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+       .offset = 0x0,
+       .vco_table = fabia_vco,
+       .num_vco = ARRAY_SIZE(fabia_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_pll0",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fabia_ops,
+               },
+       },
+};
+
+static struct clk_fixed_factor crc_div = {
+       .mult = 1,
+       .div = 2,
+       .hw.init = &(struct clk_init_data){
+               .name = "crc_div",
+               .parent_hws = (const struct clk_hw*[]){
+                       &gpu_cc_pll0.clkr.hw,
+               },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_fixed_factor_ops,
+       },
+};
+
+/* 514MHz Configuration*/
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+       .l = 0x1A,
+       .alpha = 0xC555,
+       .config_ctl_val = 0x20485699,
+       .config_ctl_hi_val = 0x00002067,
+       .test_ctl_val = 0x40000000,
+       .test_ctl_hi_val = 0x00000002,
+       .user_ctl_val = 0x00000001,
+       .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+       .offset = 0x100,
+       .vco_table = fabia_vco,
+       .num_vco = ARRAY_SIZE(fabia_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_pll1",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fabia_ops,
+               },
+       },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+       { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+       { P_GPLL0_OUT_MAIN, 5 },
+       { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &gpu_cc_pll0.clkr.hw },
+       { .hw = &gpu_cc_pll1.clkr.hw },
+       { .fw_name = "gcc_gpu_gpll0_clk" },
+       { .fw_name = "gcc_gpu_gpll0_div_clk" },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+       { P_BI_TCXO, 0 },
+       { P_CRC_DIV, 1 },
+       { P_GPU_CC_PLL0_OUT_ODD, 2 },
+       { P_GPU_CC_PLL1_OUT_EVEN, 3 },
+       { P_GPU_CC_PLL1_OUT_ODD, 4 },
+       { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &crc_div.hw },
+       { .hw = &gpu_cc_pll0.clkr.hw },
+       { .hw = &gpu_cc_pll1.clkr.hw },
+       { .hw = &gpu_cc_pll1.clkr.hw },
+       { .fw_name = "gcc_gpu_gpll0_clk" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+       F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+       .cmd_rcgr = 0x1120,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = gpu_cc_parent_map_0,
+       .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpu_cc_gmu_clk_src",
+               .parent_data = gpu_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+       F(253000000, P_CRC_DIV, 1, 0, 0),
+       F(355000000, P_CRC_DIV, 1, 0, 0),
+       F(430000000, P_CRC_DIV, 1, 0, 0),
+       F(565000000, P_CRC_DIV, 1, 0, 0),
+       F(650000000, P_CRC_DIV, 1, 0, 0),
+       F(800000000, P_CRC_DIV, 1, 0, 0),
+       F(825000000, P_CRC_DIV, 1, 0, 0),
+       F(850000000, P_CRC_DIV, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+       .cmd_rcgr = 0x101c,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = gpu_cc_parent_map_1,
+       .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpu_cc_gx_gfx3d_clk_src",
+               .parent_data = gpu_cc_parent_data_1,
+               .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gpu_cc_acd_ahb_clk = {
+       .halt_reg = 0x1168,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1168,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_acd_ahb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_acd_cxo_clk = {
+       .halt_reg = 0x1164,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1164,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_acd_cxo_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+       .halt_reg = 0x1078,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x1078,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_ahb_clk",
+                       .flags = CLK_IS_CRITICAL,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+       .halt_reg = 0x107c,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x107c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_crc_ahb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+       .halt_reg = 0x10a4,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x10a4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_gfx3d_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = {
+       .halt_reg = 0x10a8,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x10a8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_gfx3d_slv_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+       .halt_reg = 0x1098,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1098,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_gmu_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gpu_cc_gmu_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+       .halt_reg = 0x108c,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x108c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_snoc_dvm_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+       .halt_reg = 0x1004,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x1004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cxo_aon_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+       .halt_reg = 0x109c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x109c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cxo_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+       .halt_reg = 0x1060,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1060,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_gx_cxo_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+       .halt_reg = 0x1054,
+       .halt_check = BRANCH_HALT_SKIP,
+       .clkr = {
+               .enable_reg = 0x1054,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_gx_gfx3d_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+       .halt_reg = 0x1064,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1064,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_gx_gmu_clk",
+                       .parent_hws = (const struct clk_hw*[]){
+                               &gpu_cc_gmu_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+       .halt_reg = 0x1058,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x1058,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_gx_vsense_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+       .gdscr = 0x106c,
+       .gds_hw_ctrl = 0x1540,
+       .pd = {
+               .name = "gpu_cx_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+       .gdscr = 0x100c,
+       .clamp_io_ctrl = 0x1508,
+       .pd = {
+               .name = "gpu_gx_gdsc",
+               .power_on = gdsc_gx_do_nothing_enable,
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = CLAMP_IO | POLL_CFG_GDSCR,
+};
+
+static struct clk_hw *gpu_cc_sm6350_hws[] = {
+       [GPU_CC_CRC_DIV] = &crc_div.hw,
+};
+
+static struct clk_regmap *gpu_cc_sm6350_clocks[] = {
+       [GPU_CC_ACD_AHB_CLK] = &gpu_cc_acd_ahb_clk.clkr,
+       [GPU_CC_ACD_CXO_CLK] = &gpu_cc_acd_cxo_clk.clkr,
+       [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+       [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+       [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+       [GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr,
+       [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+       [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+       [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+       [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+       [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+       [GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+       [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+       [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+       [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+       [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+       [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+       [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+};
+
+static struct gdsc *gpu_cc_sm6350_gdscs[] = {
+       [GPU_CX_GDSC] = &gpu_cx_gdsc,
+       [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm6350_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .max_register = 0x8008,
+       .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm6350_desc = {
+       .config = &gpu_cc_sm6350_regmap_config,
+       .clk_hws = gpu_cc_sm6350_hws,
+       .num_clk_hws = ARRAY_SIZE(gpu_cc_sm6350_hws),
+       .clks = gpu_cc_sm6350_clocks,
+       .num_clks = ARRAY_SIZE(gpu_cc_sm6350_clocks),
+       .gdscs = gpu_cc_sm6350_gdscs,
+       .num_gdscs = ARRAY_SIZE(gpu_cc_sm6350_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm6350_match_table[] = {
+       { .compatible = "qcom,sm6350-gpucc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm6350_match_table);
+
+static int gpu_cc_sm6350_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+       unsigned int value, mask;
+
+       regmap = qcom_cc_map(pdev, &gpu_cc_sm6350_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       clk_fabia_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+       clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+       /* Configure gpu_cc_cx_gmu_clk with recommended wakeup/sleep settings */
+       mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+       mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+       value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+       regmap_update_bits(regmap, 0x1098, mask, value);
+
+       return qcom_cc_really_probe(pdev, &gpu_cc_sm6350_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm6350_driver = {
+       .probe = gpu_cc_sm6350_probe,
+       .driver = {
+               .name = "sm6350-gpucc",
+               .of_match_table = gpu_cc_sm6350_match_table,
+       },
+};
+
+static int __init gpu_cc_sm6350_init(void)
+{
+       return platform_driver_register(&gpu_cc_sm6350_driver);
+}
+core_initcall(gpu_cc_sm6350_init);
+
+static void __exit gpu_cc_sm6350_exit(void)
+{
+       platform_driver_unregister(&gpu_cc_sm6350_driver);
+}
+module_exit(gpu_cc_sm6350_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC LAGOON Driver");
+MODULE_LICENSE("GPL v2");
index 4fec1f9..88d4b33 100644 (file)
@@ -17,7 +17,7 @@ static const char *aux_parents[] = {
        "pxo",
 };
 
-static unsigned int aux_parent_map[] = {
+static const u32 aux_parent_map[] = {
        3,
        0,
 };
index a1552b6..f746629 100644 (file)
@@ -257,6 +257,18 @@ static struct clk_rcg2 mmss_ahb_clk_src = {
        },
 };
 
+static struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(37500000, P_GPLL0, 16, 0, 0),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(75000000, P_GPLL0, 8, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(200000000, P_MMPLL0, 4, 0, 0),
+       F(266666666, P_MMPLL0, 3, 0, 0),
+       { }
+};
+
 static struct freq_tbl ftbl_mmss_axi_clk[] = {
        F( 19200000, P_XO, 1, 0, 0),
        F( 37500000, P_GPLL0, 16, 0, 0),
@@ -364,6 +376,23 @@ static struct clk_rcg2 csi3_clk_src = {
        },
 };
 
+static struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = {
+       F(37500000, P_GPLL0, 16, 0, 0),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(60000000, P_GPLL0, 10, 0, 0),
+       F(80000000, P_GPLL0, 7.5, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(109090000, P_GPLL0, 5.5, 0, 0),
+       F(133330000, P_GPLL0, 4.5, 0, 0),
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(228570000, P_MMPLL0, 3.5, 0, 0),
+       F(266670000, P_MMPLL0, 3, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+       F(400000000, P_MMPLL0, 2, 0, 0),
+       { }
+};
+
 static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
        F(37500000, P_GPLL0, 16, 0, 0),
        F(50000000, P_GPLL0, 12, 0, 0),
@@ -407,6 +436,18 @@ static struct clk_rcg2 vfe1_clk_src = {
        },
 };
 
+static struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = {
+       F(37500000, P_GPLL0, 16, 0, 0),
+       F(60000000, P_GPLL0, 10, 0, 0),
+       F(75000000, P_GPLL0, 8, 0, 0),
+       F(92310000, P_GPLL0, 6.5, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(133330000, P_MMPLL0, 6, 0, 0),
+       F(177780000, P_MMPLL0, 4.5, 0, 0),
+       F(200000000, P_MMPLL0, 4, 0, 0),
+       { }
+};
+
 static struct freq_tbl ftbl_mdss_mdp_clk[] = {
        F(37500000, P_GPLL0, 16, 0, 0),
        F(60000000, P_GPLL0, 10, 0, 0),
@@ -513,6 +554,14 @@ static struct clk_rcg2 pclk1_clk_src = {
        },
 };
 
+static struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = {
+       F(66700000, P_GPLL0, 9, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(133330000, P_MMPLL0, 6, 0, 0),
+       F(160000000, P_MMPLL0, 5, 0, 0),
+       { }
+};
+
 static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
        F(50000000, P_GPLL0, 12, 0, 0),
        F(100000000, P_GPLL0, 6, 0, 0),
@@ -593,6 +642,13 @@ static struct clk_rcg2 camss_gp1_clk_src = {
        },
 };
 
+static struct freq_tbl ftbl_camss_mclk0_3_clk_msm8226[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(24000000, P_GPLL0, 5, 1, 5),
+       F(66670000, P_GPLL0, 9, 0, 0),
+       { }
+};
+
 static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
        F(4800000, P_XO, 4, 0, 0),
        F(6000000, P_GPLL0, 10, 1, 10),
@@ -705,6 +761,15 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
        },
 };
 
+static struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = {
+       F(133330000, P_GPLL0, 4.5, 0, 0),
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(266670000, P_MMPLL0, 3, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+       F(400000000, P_MMPLL0, 2, 0, 0),
+       { }
+};
+
 static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
        F(133330000, P_GPLL0, 4.5, 0, 0),
        F(266670000, P_MMPLL0, 3, 0, 0),
@@ -2366,6 +2431,116 @@ static struct gdsc oxilicx_gdsc = {
        .pwrsts = PWRSTS_OFF_ON,
 };
 
+static struct clk_regmap *mmcc_msm8226_clocks[] = {
+       [MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
+       [MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
+       [MMPLL0] = &mmpll0.clkr,
+       [MMPLL0_VOTE] = &mmpll0_vote,
+       [MMPLL1] = &mmpll1.clkr,
+       [MMPLL1_VOTE] = &mmpll1_vote,
+       [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+       [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+       [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+       [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+       [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+       [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+       [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+       [CCI_CLK_SRC] = &cci_clk_src.clkr,
+       [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+       [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+       [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+       [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+       [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+       [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+       [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+       [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+       [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+       [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+       [CAMSS_CCI_CCI_AHB_CLK] = &camss_cci_cci_ahb_clk.clkr,
+       [CAMSS_CCI_CCI_CLK] = &camss_cci_cci_clk.clkr,
+       [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+       [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+       [CAMSS_CSI0PHY_CLK] = &camss_csi0phy_clk.clkr,
+       [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+       [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+       [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+       [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+       [CAMSS_CSI1PHY_CLK] = &camss_csi1phy_clk.clkr,
+       [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+       [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+       [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+       [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+       [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+       [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+       [CAMSS_JPEG_JPEG0_CLK] = &camss_jpeg_jpeg0_clk.clkr,
+       [CAMSS_JPEG_JPEG_AHB_CLK] = &camss_jpeg_jpeg_ahb_clk.clkr,
+       [CAMSS_JPEG_JPEG_AXI_CLK] = &camss_jpeg_jpeg_axi_clk.clkr,
+       [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+       [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+       [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+       [CAMSS_PHY0_CSI0PHYTIMER_CLK] = &camss_phy0_csi0phytimer_clk.clkr,
+       [CAMSS_PHY1_CSI1PHYTIMER_CLK] = &camss_phy1_csi1phytimer_clk.clkr,
+       [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+       [CAMSS_VFE_CPP_AHB_CLK] = &camss_vfe_cpp_ahb_clk.clkr,
+       [CAMSS_VFE_CPP_CLK] = &camss_vfe_cpp_clk.clkr,
+       [CAMSS_VFE_VFE0_CLK] = &camss_vfe_vfe0_clk.clkr,
+       [CAMSS_VFE_VFE_AHB_CLK] = &camss_vfe_vfe_ahb_clk.clkr,
+       [CAMSS_VFE_VFE_AXI_CLK] = &camss_vfe_vfe_axi_clk.clkr,
+       [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+       [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+       [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+       [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+       [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+       [MDSS_MDP_LUT_CLK] = &mdss_mdp_lut_clk.clkr,
+       [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+       [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+       [MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
+       [MMSS_MMSSNOC_AHB_CLK] = &mmss_mmssnoc_ahb_clk.clkr,
+       [MMSS_MMSSNOC_BTO_AHB_CLK] = &mmss_mmssnoc_bto_ahb_clk.clkr,
+       [MMSS_MMSSNOC_AXI_CLK] = &mmss_mmssnoc_axi_clk.clkr,
+       [MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
+       [OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
+       [OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
+       [OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
+       [OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
+       [OXILICX_AXI_CLK] = &oxilicx_axi_clk.clkr,
+       [VENUS0_AHB_CLK] = &venus0_ahb_clk.clkr,
+       [VENUS0_AXI_CLK] = &venus0_axi_clk.clkr,
+       [VENUS0_VCODEC0_CLK] = &venus0_vcodec0_clk.clkr,
+};
+
+static const struct qcom_reset_map mmcc_msm8226_resets[] = {
+       [SPDM_RESET] = { 0x0200 },
+       [SPDM_RM_RESET] = { 0x0300 },
+       [VENUS0_RESET] = { 0x1020 },
+       [MDSS_RESET] = { 0x2300 },
+};
+
+static struct gdsc *mmcc_msm8226_gdscs[] = {
+       [VENUS0_GDSC] = &venus0_gdsc,
+       [MDSS_GDSC] = &mdss_gdsc,
+       [CAMSS_JPEG_GDSC] = &camss_jpeg_gdsc,
+       [CAMSS_VFE_GDSC] = &camss_vfe_gdsc,
+};
+
+static const struct regmap_config mmcc_msm8226_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x5104,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc mmcc_msm8226_desc = {
+       .config = &mmcc_msm8226_regmap_config,
+       .clks = mmcc_msm8226_clocks,
+       .num_clks = ARRAY_SIZE(mmcc_msm8226_clocks),
+       .resets = mmcc_msm8226_resets,
+       .num_resets = ARRAY_SIZE(mmcc_msm8226_resets),
+       .gdscs = mmcc_msm8226_gdscs,
+       .num_gdscs = ARRAY_SIZE(mmcc_msm8226_gdscs),
+};
+
 static struct clk_regmap *mmcc_msm8974_clocks[] = {
        [MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
        [MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
@@ -2569,23 +2744,44 @@ static const struct qcom_cc_desc mmcc_msm8974_desc = {
 };
 
 static const struct of_device_id mmcc_msm8974_match_table[] = {
-       { .compatible = "qcom,mmcc-msm8974" },
+       { .compatible = "qcom,mmcc-msm8226", .data = &mmcc_msm8226_desc },
+       { .compatible = "qcom,mmcc-msm8974", .data = &mmcc_msm8974_desc },
        { }
 };
 MODULE_DEVICE_TABLE(of, mmcc_msm8974_match_table);
 
+static void msm8226_clock_override(void)
+{
+       mmss_axi_clk_src.freq_tbl = ftbl_mmss_axi_clk_msm8226;
+       vfe0_clk_src.freq_tbl = ftbl_camss_vfe_vfe0_clk_msm8226;
+       mdp_clk_src.freq_tbl = ftbl_mdss_mdp_clk_msm8226;
+       vcodec0_clk_src.freq_tbl = ftbl_venus0_vcodec0_clk_msm8226;
+       mclk0_clk_src.freq_tbl = ftbl_camss_mclk0_3_clk_msm8226;
+       mclk1_clk_src.freq_tbl = ftbl_camss_mclk0_3_clk_msm8226;
+       cpp_clk_src.freq_tbl = ftbl_camss_vfe_cpp_clk_msm8226;
+}
+
 static int mmcc_msm8974_probe(struct platform_device *pdev)
 {
        struct regmap *regmap;
+       const struct qcom_cc_desc *desc;
+
+       desc = of_device_get_match_data(&pdev->dev);
+       if (!desc)
+               return -EINVAL;
 
-       regmap = qcom_cc_map(pdev, &mmcc_msm8974_desc);
+       regmap = qcom_cc_map(pdev, desc);
        if (IS_ERR(regmap))
                return PTR_ERR(regmap);
 
-       clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
-       clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
+       if (desc == &mmcc_msm8974_desc) {
+               clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
+               clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
+       } else {
+               msm8226_clock_override();
+       }
 
-       return qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
+       return qcom_cc_really_probe(pdev, desc, regmap);
 }
 
 static struct platform_driver mmcc_msm8974_driver = {
index ed57bbb..5b9b54f 100644 (file)
@@ -99,8 +99,8 @@ static struct clk_branch video_cc_vcodec0_core_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "video_cc_vcodec0_core_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &video_cc_venus_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &video_cc_venus_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
@@ -143,8 +143,8 @@ static struct clk_branch video_cc_venus_ctl_core_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "video_cc_venus_ctl_core_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &video_cc_venus_clk_src.clkr.hw,
+                       .parent_hws = (const struct clk_hw*[]){
+                               &video_cc_venus_clk_src.clkr.hw,
                        },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
index be6e6ae..c281f3a 100644 (file)
@@ -34,6 +34,7 @@ config CLK_RENESAS
        select CLK_R8A779F0 if ARCH_R8A779F0
        select CLK_R9A06G032 if ARCH_R9A06G032
        select CLK_R9A07G044 if ARCH_R9A07G044
+       select CLK_R9A07G054 if ARCH_R9A07G054
        select CLK_SH73A0 if ARCH_SH73A0
 
 if CLK_RENESAS
@@ -163,6 +164,10 @@ config CLK_R9A07G044
        bool "RZ/G2L clock support" if COMPILE_TEST
        select CLK_RZG2L
 
+config CLK_R9A07G054
+       bool "RZ/V2L clock support" if COMPILE_TEST
+       select CLK_RZG2L
+
 config CLK_SH73A0
        bool "SH-Mobile AG5 clock support" if COMPILE_TEST
        select CLK_RENESAS_CPG_MSTP
@@ -195,7 +200,7 @@ config CLK_RCAR_USB2_CLOCK_SEL
          This is a driver for R-Car USB2 clock selector
 
 config CLK_RZG2L
-       bool "Renesas RZ/G2L family clock support" if COMPILE_TEST
+       bool "Renesas RZ/{G2L,V2L} family clock support" if COMPILE_TEST
        select RESET_CONTROLLER
 
 # Generic
index 8b34db1..d5e5716 100644 (file)
@@ -31,6 +31,7 @@ obj-$(CONFIG_CLK_R8A779A0)            += r8a779a0-cpg-mssr.o
 obj-$(CONFIG_CLK_R8A779F0)             += r8a779f0-cpg-mssr.o
 obj-$(CONFIG_CLK_R9A06G032)            += r9a06g032-clocks.o
 obj-$(CONFIG_CLK_R9A07G044)            += r9a07g044-cpg.o
+obj-$(CONFIG_CLK_R9A07G054)            += r9a07g044-cpg.o
 obj-$(CONFIG_CLK_SH73A0)               += clk-sh73a0.o
 
 # Family
index faf60f7..d34d97b 100644 (file)
@@ -200,6 +200,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
        DEF_MOD("du0",                   724,   R8A77990_CLK_S1D1),
        DEF_MOD("lvds",                  727,   R8A77990_CLK_S2D1),
 
+       DEF_MOD("mlp",                   802,   R8A77990_CLK_S2D1),
        DEF_MOD("vin5",                  806,   R8A77990_CLK_S1D2),
        DEF_MOD("vin4",                  807,   R8A77990_CLK_S1D2),
        DEF_MOD("etheravb",              812,   R8A77990_CLK_S3D2),
index 7713cfd..525eef1 100644 (file)
@@ -160,6 +160,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
        DEF_MOD("du1",                   723,   R8A77995_CLK_S1D1),
        DEF_MOD("du0",                   724,   R8A77995_CLK_S1D1),
        DEF_MOD("lvds",                  727,   R8A77995_CLK_S2D1),
+       DEF_MOD("mlp",                   802,   R8A77995_CLK_S2D1),
        DEF_MOD("vin4",                  807,   R8A77995_CLK_S1D2),
        DEF_MOD("etheravb",              812,   R8A77995_CLK_S3D2),
        DEF_MOD("imr0",                  823,   R8A77995_CLK_S1D2),
index 1c09d4e..fadd8a1 100644 (file)
@@ -136,6 +136,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
        DEF_MOD("avb3",         214,    R8A779A0_CLK_S3D2),
        DEF_MOD("avb4",         215,    R8A779A0_CLK_S3D2),
        DEF_MOD("avb5",         216,    R8A779A0_CLK_S3D2),
+       DEF_MOD("canfd0",       328,    R8A779A0_CLK_CANFD),
        DEF_MOD("csi40",        331,    R8A779A0_CLK_CSI0),
        DEF_MOD("csi41",        400,    R8A779A0_CLK_CSI0),
        DEF_MOD("csi42",        401,    R8A779A0_CLK_CSI0),
index e6ec02c..76b4419 100644 (file)
@@ -103,7 +103,7 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
        DEF_FIXED("s0d12_hsc",  R8A779F0_CLK_S0D12_HSC, CLK_S0,         12, 1),
        DEF_FIXED("cl16m_hsc",  R8A779F0_CLK_CL16M_HSC, CLK_S0,         48, 1),
        DEF_FIXED("s0d2_cc",    R8A779F0_CLK_S0D2_CC,   CLK_S0,         2, 1),
-       DEF_FIXED("rsw2",       R8A779F0_CLK_RSW2,      CLK_PLL5,       2, 1),
+       DEF_FIXED("rsw2",       R8A779F0_CLK_RSW2,      CLK_PLL5_DIV2,  5, 1),
        DEF_FIXED("cbfusa",     R8A779F0_CLK_CBFUSA,    CLK_EXTAL,      2, 1),
        DEF_FIXED("cpex",       R8A779F0_CLK_CPEX,      CLK_EXTAL,      2, 1),
 
@@ -115,10 +115,24 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
 };
 
 static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+       DEF_MOD("i2c0",         518,    R8A779F0_CLK_S0D6_PER),
+       DEF_MOD("i2c1",         519,    R8A779F0_CLK_S0D6_PER),
+       DEF_MOD("i2c2",         520,    R8A779F0_CLK_S0D6_PER),
+       DEF_MOD("i2c3",         521,    R8A779F0_CLK_S0D6_PER),
+       DEF_MOD("i2c4",         522,    R8A779F0_CLK_S0D6_PER),
+       DEF_MOD("i2c5",         523,    R8A779F0_CLK_S0D6_PER),
        DEF_MOD("scif0",        702,    R8A779F0_CLK_S0D12_PER),
        DEF_MOD("scif1",        703,    R8A779F0_CLK_S0D12_PER),
        DEF_MOD("scif3",        704,    R8A779F0_CLK_S0D12_PER),
        DEF_MOD("scif4",        705,    R8A779F0_CLK_S0D12_PER),
+       DEF_MOD("sys-dmac0",    709,    R8A779F0_CLK_S0D3_PER),
+       DEF_MOD("sys-dmac1",    710,    R8A779F0_CLK_S0D3_PER),
+       DEF_MOD("wdt",          907,    R8A779F0_CLK_R),
+       DEF_MOD("pfc0",         915,    R8A779F0_CLK_CL16M),
+};
+
+static const unsigned int r8a779f0_crit_mod_clks[] __initconst = {
+       MOD_CLK_ID(907),        /* WDT */
 };
 
 /*
@@ -175,6 +189,10 @@ const struct cpg_mssr_info r8a779f0_cpg_mssr_info __initconst = {
        .num_mod_clks = ARRAY_SIZE(r8a779f0_mod_clks),
        .num_hw_mod_clks = 28 * 32,
 
+       /* Critical Module Clocks */
+       .crit_mod_clks = r8a779f0_crit_mod_clks,
+       .num_crit_mod_clks = ARRAY_SIZE(r8a779f0_crit_mod_clks),
+
        /* Callbacks */
        .init = r8a779f0_cpg_mssr_init,
        .cpg_clk_register = rcar_gen4_cpg_clk_register,
index 79042bf..bdfabb9 100644 (file)
 #include <linux/kernel.h>
 
 #include <dt-bindings/clock/r9a07g044-cpg.h>
+#include <dt-bindings/clock/r9a07g054-cpg.h>
 
 #include "rzg2l-cpg.h"
 
 enum clk_ids {
        /* Core Clock Outputs exported to DT */
-       LAST_DT_CORE_CLK = R9A07G044_CLK_P0_DIV2,
+       LAST_DT_CORE_CLK = R9A07G054_CLK_DRP_A,
 
        /* External Input Clocks */
        CLK_EXTAL,
@@ -80,200 +81,222 @@ static const char * const sel_pll6_2[]    = { ".pll6_250", ".pll5_250" };
 static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
 static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" };
 
-static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
-       /* External Clock Inputs */
-       DEF_INPUT("extal", CLK_EXTAL),
+static const struct {
+       struct cpg_core_clk common[44];
+#ifdef CONFIG_CLK_R9A07G054
+       struct cpg_core_clk drp[0];
+#endif
+} core_clks __initconst = {
+       .common = {
+               /* External Clock Inputs */
+               DEF_INPUT("extal", CLK_EXTAL),
 
-       /* Internal Core Clocks */
-       DEF_FIXED(".osc", R9A07G044_OSCCLK, CLK_EXTAL, 1, 1),
-       DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
-       DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)),
-       DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 133, 2),
-       DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 133, 2),
-       DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4),
-       DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
+               /* Internal Core Clocks */
+               DEF_FIXED(".osc", R9A07G044_OSCCLK, CLK_EXTAL, 1, 1),
+               DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
+               DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)),
+               DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
+               DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
+               DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4),
+               DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
 
-       DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
-       DEF_FIXED(".pll5_fout3", CLK_PLL5_FOUT3, CLK_PLL5, 1, 6),
+               DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
+               DEF_FIXED(".pll5_fout3", CLK_PLL5_FOUT3, CLK_PLL5, 1, 6),
 
-       DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
+               DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
 
-       DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2),
-       DEF_FIXED(".clk_800", CLK_PLL2_800, CLK_PLL2, 1, 2),
-       DEF_FIXED(".clk_533", CLK_PLL2_SDHI_533, CLK_PLL2, 1, 3),
-       DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2),
-       DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2),
+               DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2),
+               DEF_FIXED(".clk_800", CLK_PLL2_800, CLK_PLL2, 1, 2),
+               DEF_FIXED(".clk_533", CLK_PLL2_SDHI_533, CLK_PLL2, 1, 3),
+               DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2),
+               DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2),
 
-       DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8),
-       DEF_FIXED(".pll2_div2_10", CLK_PLL2_DIV2_10, CLK_PLL2_DIV2, 1, 10),
+               DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8),
+               DEF_FIXED(".pll2_div2_10", CLK_PLL2_DIV2_10, CLK_PLL2_DIV2, 1, 10),
 
-       DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
-       DEF_FIXED(".pll3_div2_2", CLK_PLL3_DIV2_2, CLK_PLL3_DIV2, 1, 2),
-       DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
-       DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
-       DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3,
-               sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY),
-       DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3,
-               DIVPL3C, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+               DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
+               DEF_FIXED(".pll3_div2_2", CLK_PLL3_DIV2_2, CLK_PLL3_DIV2, 1, 2),
+               DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
+               DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
+               DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3,
+                       sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY),
+               DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3,
+                       DIVPL3C, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
 
-       DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2),
-       DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
-       DEF_MUX(".sel_gpu2", CLK_SEL_GPU2, SEL_GPU2,
-               sel_gpu2, ARRAY_SIZE(sel_gpu2), 0, CLK_MUX_READ_ONLY),
+               DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2),
+               DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
+               DEF_MUX(".sel_gpu2", CLK_SEL_GPU2, SEL_GPU2,
+                       sel_gpu2, ARRAY_SIZE(sel_gpu2), 0, CLK_MUX_READ_ONLY),
 
-       /* Core output clk */
-       DEF_DIV("I", R9A07G044_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8,
-               CLK_DIVIDER_HIWORD_MASK),
-       DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A,
-               dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
-       DEF_FIXED("P0_DIV2", R9A07G044_CLK_P0_DIV2, R9A07G044_CLK_P0, 1, 2),
-       DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV2_10, 1, 1),
-       DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
-               DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
-       DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2),
-       DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
-               DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
-       DEF_FIXED("M0", R9A07G044_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
-       DEF_FIXED("ZT", R9A07G044_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
-       DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2,
-               sel_pll6_2, ARRAY_SIZE(sel_pll6_2), 0, CLK_MUX_HIWORD_MASK),
-       DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
-       DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
-       DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0,
-                  sel_shdi, ARRAY_SIZE(sel_shdi)),
-       DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1,
-                  sel_shdi, ARRAY_SIZE(sel_shdi)),
-       DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
-       DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
-       DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8,
-               CLK_DIVIDER_HIWORD_MASK),
+               /* Core output clk */
+               DEF_DIV("I", R9A07G044_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8,
+                       CLK_DIVIDER_HIWORD_MASK),
+               DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A,
+                       dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+               DEF_FIXED("P0_DIV2", R9A07G044_CLK_P0_DIV2, R9A07G044_CLK_P0, 1, 2),
+               DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV2_10, 1, 1),
+               DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
+                       DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+               DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2),
+               DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
+                       DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+               DEF_FIXED("M0", R9A07G044_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
+               DEF_FIXED("ZT", R9A07G044_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
+               DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2,
+                       sel_pll6_2, ARRAY_SIZE(sel_pll6_2), 0, CLK_MUX_HIWORD_MASK),
+               DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
+               DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
+               DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0,
+                          sel_shdi, ARRAY_SIZE(sel_shdi)),
+               DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1,
+                          sel_shdi, ARRAY_SIZE(sel_shdi)),
+               DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
+               DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
+               DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8,
+                       CLK_DIVIDER_HIWORD_MASK),
+       },
+#ifdef CONFIG_CLK_R9A07G054
+       .drp = {
+       },
+#endif
 };
 
-static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
-       DEF_MOD("gic",          R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
-                               0x514, 0),
-       DEF_MOD("ia55_pclk",    R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
-                               0x518, 0),
-       DEF_MOD("ia55_clk",     R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
-                               0x518, 1),
-       DEF_MOD("dmac_aclk",    R9A07G044_DMAC_ACLK, R9A07G044_CLK_P1,
-                               0x52c, 0),
-       DEF_MOD("dmac_pclk",    R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
-                               0x52c, 1),
-       DEF_MOD("ostm0_pclk",   R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
-                               0x534, 0),
-       DEF_MOD("ostm1_clk",    R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
-                               0x534, 1),
-       DEF_MOD("ostm2_pclk",   R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
-                               0x534, 2),
-       DEF_MOD("wdt0_pclk",    R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
-                               0x548, 0),
-       DEF_MOD("wdt0_clk",     R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
-                               0x548, 1),
-       DEF_MOD("wdt1_pclk",    R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
-                               0x548, 2),
-       DEF_MOD("wdt1_clk",     R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
-                               0x548, 3),
-       DEF_MOD("wdt2_pclk",    R9A07G044_WDT2_PCLK, R9A07G044_CLK_P0,
-                               0x548, 4),
-       DEF_MOD("wdt2_clk",     R9A07G044_WDT2_CLK, R9A07G044_OSCCLK,
-                               0x548, 5),
-       DEF_MOD("spi_clk2",     R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
-                               0x550, 0),
-       DEF_MOD("spi_clk",      R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
-                               0x550, 1),
-       DEF_MOD("sdhi0_imclk",  R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
-                               0x554, 0),
-       DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
-                               0x554, 1),
-       DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
-                               0x554, 2),
-       DEF_MOD("sdhi0_aclk",   R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
-                               0x554, 3),
-       DEF_MOD("sdhi1_imclk",  R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
-                               0x554, 4),
-       DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
-                               0x554, 5),
-       DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
-                               0x554, 6),
-       DEF_MOD("sdhi1_aclk",   R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
-                               0x554, 7),
-       DEF_MOD("gpu_clk",      R9A07G044_GPU_CLK, R9A07G044_CLK_G,
-                               0x558, 0),
-       DEF_MOD("gpu_axi_clk",  R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
-                               0x558, 1),
-       DEF_MOD("gpu_ace_clk",  R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
-                               0x558, 2),
-       DEF_MOD("ssi0_pclk",    R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
-                               0x570, 0),
-       DEF_MOD("ssi0_sfr",     R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
-                               0x570, 1),
-       DEF_MOD("ssi1_pclk",    R9A07G044_SSI1_PCLK2, R9A07G044_CLK_P0,
-                               0x570, 2),
-       DEF_MOD("ssi1_sfr",     R9A07G044_SSI1_PCLK_SFR, R9A07G044_CLK_P0,
-                               0x570, 3),
-       DEF_MOD("ssi2_pclk",    R9A07G044_SSI2_PCLK2, R9A07G044_CLK_P0,
-                               0x570, 4),
-       DEF_MOD("ssi2_sfr",     R9A07G044_SSI2_PCLK_SFR, R9A07G044_CLK_P0,
-                               0x570, 5),
-       DEF_MOD("ssi3_pclk",    R9A07G044_SSI3_PCLK2, R9A07G044_CLK_P0,
-                               0x570, 6),
-       DEF_MOD("ssi3_sfr",     R9A07G044_SSI3_PCLK_SFR, R9A07G044_CLK_P0,
-                               0x570, 7),
-       DEF_MOD("usb0_host",    R9A07G044_USB_U2H0_HCLK, R9A07G044_CLK_P1,
-                               0x578, 0),
-       DEF_MOD("usb1_host",    R9A07G044_USB_U2H1_HCLK, R9A07G044_CLK_P1,
-                               0x578, 1),
-       DEF_MOD("usb0_func",    R9A07G044_USB_U2P_EXR_CPUCLK, R9A07G044_CLK_P1,
-                               0x578, 2),
-       DEF_MOD("usb_pclk",     R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
-                               0x578, 3),
-       DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
-                               0x57c, 0),
-       DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
-                               0x57c, 0),
-       DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
-                               0x57c, 1),
-       DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
-                               0x57c, 1),
-       DEF_MOD("i2c0",         R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
-                               0x580, 0),
-       DEF_MOD("i2c1",         R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
-                               0x580, 1),
-       DEF_MOD("i2c2",         R9A07G044_I2C2_PCLK, R9A07G044_CLK_P0,
-                               0x580, 2),
-       DEF_MOD("i2c3",         R9A07G044_I2C3_PCLK, R9A07G044_CLK_P0,
-                               0x580, 3),
-       DEF_MOD("scif0",        R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
-                               0x584, 0),
-       DEF_MOD("scif1",        R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
-                               0x584, 1),
-       DEF_MOD("scif2",        R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
-                               0x584, 2),
-       DEF_MOD("scif3",        R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
-                               0x584, 3),
-       DEF_MOD("scif4",        R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
-                               0x584, 4),
-       DEF_MOD("sci0",         R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
-                               0x588, 0),
-       DEF_MOD("sci1",         R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
-                               0x588, 1),
-       DEF_MOD("rspi0",        R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
-                               0x590, 0),
-       DEF_MOD("rspi1",        R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
-                               0x590, 1),
-       DEF_MOD("rspi2",        R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
-                               0x590, 2),
-       DEF_MOD("canfd",        R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
-                               0x594, 0),
-       DEF_MOD("gpio",         R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
-                               0x598, 0),
-       DEF_MOD("adc_adclk",    R9A07G044_ADC_ADCLK, R9A07G044_CLK_TSU,
-                               0x5a8, 0),
-       DEF_MOD("adc_pclk",     R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
-                               0x5a8, 1),
-       DEF_MOD("tsu_pclk",     R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
-                               0x5ac, 0),
+static const struct {
+       struct rzg2l_mod_clk common[62];
+#ifdef CONFIG_CLK_R9A07G054
+       struct rzg2l_mod_clk drp[0];
+#endif
+} mod_clks = {
+       .common = {
+               DEF_MOD("gic",          R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
+                                       0x514, 0),
+               DEF_MOD("ia55_pclk",    R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
+                                       0x518, 0),
+               DEF_MOD("ia55_clk",     R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
+                                       0x518, 1),
+               DEF_MOD("dmac_aclk",    R9A07G044_DMAC_ACLK, R9A07G044_CLK_P1,
+                                       0x52c, 0),
+               DEF_MOD("dmac_pclk",    R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
+                                       0x52c, 1),
+               DEF_MOD("ostm0_pclk",   R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
+                                       0x534, 0),
+               DEF_MOD("ostm1_clk",    R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
+                                       0x534, 1),
+               DEF_MOD("ostm2_pclk",   R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
+                                       0x534, 2),
+               DEF_MOD("wdt0_pclk",    R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
+                                       0x548, 0),
+               DEF_MOD("wdt0_clk",     R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
+                                       0x548, 1),
+               DEF_MOD("wdt1_pclk",    R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
+                                       0x548, 2),
+               DEF_MOD("wdt1_clk",     R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
+                                       0x548, 3),
+               DEF_MOD("wdt2_pclk",    R9A07G044_WDT2_PCLK, R9A07G044_CLK_P0,
+                                       0x548, 4),
+               DEF_MOD("wdt2_clk",     R9A07G044_WDT2_CLK, R9A07G044_OSCCLK,
+                                       0x548, 5),
+               DEF_MOD("spi_clk2",     R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
+                                       0x550, 0),
+               DEF_MOD("spi_clk",      R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
+                                       0x550, 1),
+               DEF_MOD("sdhi0_imclk",  R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
+                                       0x554, 0),
+               DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
+                                       0x554, 1),
+               DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
+                                       0x554, 2),
+               DEF_MOD("sdhi0_aclk",   R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
+                                       0x554, 3),
+               DEF_MOD("sdhi1_imclk",  R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
+                                       0x554, 4),
+               DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
+                                       0x554, 5),
+               DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
+                                       0x554, 6),
+               DEF_MOD("sdhi1_aclk",   R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
+                                       0x554, 7),
+               DEF_MOD("gpu_clk",      R9A07G044_GPU_CLK, R9A07G044_CLK_G,
+                                       0x558, 0),
+               DEF_MOD("gpu_axi_clk",  R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
+                                       0x558, 1),
+               DEF_MOD("gpu_ace_clk",  R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
+                                       0x558, 2),
+               DEF_MOD("ssi0_pclk",    R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
+                                       0x570, 0),
+               DEF_MOD("ssi0_sfr",     R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
+                                       0x570, 1),
+               DEF_MOD("ssi1_pclk",    R9A07G044_SSI1_PCLK2, R9A07G044_CLK_P0,
+                                       0x570, 2),
+               DEF_MOD("ssi1_sfr",     R9A07G044_SSI1_PCLK_SFR, R9A07G044_CLK_P0,
+                                       0x570, 3),
+               DEF_MOD("ssi2_pclk",    R9A07G044_SSI2_PCLK2, R9A07G044_CLK_P0,
+                                       0x570, 4),
+               DEF_MOD("ssi2_sfr",     R9A07G044_SSI2_PCLK_SFR, R9A07G044_CLK_P0,
+                                       0x570, 5),
+               DEF_MOD("ssi3_pclk",    R9A07G044_SSI3_PCLK2, R9A07G044_CLK_P0,
+                                       0x570, 6),
+               DEF_MOD("ssi3_sfr",     R9A07G044_SSI3_PCLK_SFR, R9A07G044_CLK_P0,
+                                       0x570, 7),
+               DEF_MOD("usb0_host",    R9A07G044_USB_U2H0_HCLK, R9A07G044_CLK_P1,
+                                       0x578, 0),
+               DEF_MOD("usb1_host",    R9A07G044_USB_U2H1_HCLK, R9A07G044_CLK_P1,
+                                       0x578, 1),
+               DEF_MOD("usb0_func",    R9A07G044_USB_U2P_EXR_CPUCLK, R9A07G044_CLK_P1,
+                                       0x578, 2),
+               DEF_MOD("usb_pclk",     R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
+                                       0x578, 3),
+               DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
+                                       0x57c, 0),
+               DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
+                                       0x57c, 0),
+               DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
+                                       0x57c, 1),
+               DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
+                                       0x57c, 1),
+               DEF_MOD("i2c0",         R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
+                                       0x580, 0),
+               DEF_MOD("i2c1",         R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
+                                       0x580, 1),
+               DEF_MOD("i2c2",         R9A07G044_I2C2_PCLK, R9A07G044_CLK_P0,
+                                       0x580, 2),
+               DEF_MOD("i2c3",         R9A07G044_I2C3_PCLK, R9A07G044_CLK_P0,
+                                       0x580, 3),
+               DEF_MOD("scif0",        R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
+                                       0x584, 0),
+               DEF_MOD("scif1",        R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
+                                       0x584, 1),
+               DEF_MOD("scif2",        R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
+                                       0x584, 2),
+               DEF_MOD("scif3",        R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
+                                       0x584, 3),
+               DEF_MOD("scif4",        R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
+                                       0x584, 4),
+               DEF_MOD("sci0",         R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
+                                       0x588, 0),
+               DEF_MOD("sci1",         R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
+                                       0x588, 1),
+               DEF_MOD("rspi0",        R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
+                                       0x590, 0),
+               DEF_MOD("rspi1",        R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
+                                       0x590, 1),
+               DEF_MOD("rspi2",        R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
+                                       0x590, 2),
+               DEF_MOD("canfd",        R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
+                                       0x594, 0),
+               DEF_MOD("gpio",         R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
+                                       0x598, 0),
+               DEF_MOD("adc_adclk",    R9A07G044_ADC_ADCLK, R9A07G044_CLK_TSU,
+                                       0x5a8, 0),
+               DEF_MOD("adc_pclk",     R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
+                                       0x5a8, 1),
+               DEF_MOD("tsu_pclk",     R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
+                                       0x5ac, 0),
+       },
+#ifdef CONFIG_CLK_R9A07G054
+       .drp = {
+       },
+#endif
 };
 
 static struct rzg2l_reset r9a07g044_resets[] = {
@@ -336,8 +359,8 @@ static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
 
 const struct rzg2l_cpg_info r9a07g044_cpg_info = {
        /* Core Clocks */
-       .core_clks = r9a07g044_core_clks,
-       .num_core_clks = ARRAY_SIZE(r9a07g044_core_clks),
+       .core_clks = core_clks.common,
+       .num_core_clks = ARRAY_SIZE(core_clks.common),
        .last_dt_core_clk = LAST_DT_CORE_CLK,
        .num_total_core_clks = MOD_CLK_BASE,
 
@@ -346,11 +369,34 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = {
        .num_crit_mod_clks = ARRAY_SIZE(r9a07g044_crit_mod_clks),
 
        /* Module Clocks */
-       .mod_clks = r9a07g044_mod_clks,
-       .num_mod_clks = ARRAY_SIZE(r9a07g044_mod_clks),
+       .mod_clks = mod_clks.common,
+       .num_mod_clks = ARRAY_SIZE(mod_clks.common),
        .num_hw_mod_clks = R9A07G044_TSU_PCLK + 1,
 
        /* Resets */
        .resets = r9a07g044_resets,
-       .num_resets = ARRAY_SIZE(r9a07g044_resets),
+       .num_resets = R9A07G044_TSU_PRESETN + 1, /* Last reset ID + 1 */
+};
+
+#ifdef CONFIG_CLK_R9A07G054
+const struct rzg2l_cpg_info r9a07g054_cpg_info = {
+       /* Core Clocks */
+       .core_clks = core_clks.common,
+       .num_core_clks = ARRAY_SIZE(core_clks.common) + ARRAY_SIZE(core_clks.drp),
+       .last_dt_core_clk = LAST_DT_CORE_CLK,
+       .num_total_core_clks = MOD_CLK_BASE,
+
+       /* Critical Module Clocks */
+       .crit_mod_clks = r9a07g044_crit_mod_clks,
+       .num_crit_mod_clks = ARRAY_SIZE(r9a07g044_crit_mod_clks),
+
+       /* Module Clocks */
+       .mod_clks = mod_clks.common,
+       .num_mod_clks = ARRAY_SIZE(mod_clks.common) + ARRAY_SIZE(mod_clks.drp),
+       .num_hw_mod_clks = R9A07G054_STPAI_ACLK_DRP + 1,
+
+       /* Resets */
+       .resets = r9a07g044_resets,
+       .num_resets = R9A07G054_STPAI_ARESETN + 1, /* Last reset ID + 1 */
 };
+#endif
index edd0abe..486d065 100644 (file)
@@ -952,6 +952,12 @@ static const struct of_device_id rzg2l_cpg_match[] = {
                .compatible = "renesas,r9a07g044-cpg",
                .data = &r9a07g044_cpg_info,
        },
+#endif
+#ifdef CONFIG_CLK_R9A07G054
+       {
+               .compatible = "renesas,r9a07g054-cpg",
+               .data = &r9a07g054_cpg_info,
+       },
 #endif
        { /* sentinel */ }
 };
index 5729d10..ce657be 100644 (file)
@@ -203,5 +203,6 @@ struct rzg2l_cpg_info {
 };
 
 extern const struct rzg2l_cpg_info r9a07g044_cpg_info;
+extern const struct rzg2l_cpg_info r9a07g054_cpg_info;
 
 #endif
index 69a9e80..606ae6c 100644 (file)
@@ -71,11 +71,17 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
        RK3036_PLL_RATE(500000000, 1, 125, 6, 1, 1, 0),
        RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
        RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+       RK3036_PLL_RATE(297000000, 2, 99, 4, 1, 1, 0),
+       RK3036_PLL_RATE(241500000, 2, 161, 4, 2, 1, 0),
        RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
        RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
        RK3036_PLL_RATE(148500000, 1, 99, 4, 4, 1, 0),
+       RK3036_PLL_RATE(135000000, 2, 45, 4, 1, 1, 0),
+       RK3036_PLL_RATE(119000000, 3, 119, 4, 2, 1, 0),
+       RK3036_PLL_RATE(108000000, 2, 45, 5, 1, 1, 0),
        RK3036_PLL_RATE(100000000, 1, 150, 6, 6, 1, 0),
        RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
+       RK3036_PLL_RATE(78750000, 1, 96, 6, 4, 1, 0),
        RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
        { /* sentinel */ },
 };
@@ -1038,13 +1044,13 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
                        RK3568_CLKGATE_CON(20), 8, GFLAGS),
        GATE(HCLK_VOP, "hclk_vop", "hclk_vo", 0,
                        RK3568_CLKGATE_CON(20), 9, GFLAGS),
-       COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+       COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
                        RK3568_CLKSEL_CON(39), 10, 2, MFLAGS, 0, 8, DFLAGS,
                        RK3568_CLKGATE_CON(20), 10, GFLAGS),
-       COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+       COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
                        RK3568_CLKSEL_CON(40), 10, 2, MFLAGS, 0, 8, DFLAGS,
                        RK3568_CLKGATE_CON(20), 11, GFLAGS),
-       COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, 0,
+       COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
                        RK3568_CLKSEL_CON(41), 10, 2, MFLAGS, 0, 8, DFLAGS,
                        RK3568_CLKGATE_CON(20), 12, GFLAGS),
        GATE(CLK_VOP_PWM, "clk_vop_pwm", "xin24m", 0,
@@ -1562,7 +1568,7 @@ static struct rockchip_clk_branch rk3568_clk_pmu_branches[] __initdata = {
                        RK3568_PMU_CLKGATE_CON(2), 14, GFLAGS),
        GATE(XIN_OSC0_EDPPHY_G, "xin_osc0_edpphy_g", "xin24m", 0,
                        RK3568_PMU_CLKGATE_CON(2), 15, GFLAGS),
-       MUX(CLK_HDMI_REF, "clk_hdmi_ref", clk_hdmi_ref_p, 0,
+       MUX(CLK_HDMI_REF, "clk_hdmi_ref", clk_hdmi_ref_p, CLK_SET_RATE_PARENT,
                        RK3568_PMU_CLKSEL_CON(8), 7, 1, MFLAGS),
 };
 
@@ -1697,14 +1703,12 @@ static const struct of_device_id clk_rk3568_match_table[] = {
 static int __init clk_rk3568_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
-       const struct of_device_id *match;
        const struct clk_rk3568_inits *init_data;
 
-       match = of_match_device(clk_rk3568_match_table, &pdev->dev);
-       if (!match || !match->data)
+       init_data = (struct clk_rk3568_inits *)of_device_get_match_data(&pdev->dev);
+       if (!init_data)
                return -EINVAL;
 
-       init_data = match->data;
        if (init_data->inits)
                init_data->inits(np);
 
index b7be7e1..bb8a844 100644 (file)
@@ -180,6 +180,7 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
                unsigned long rate, unsigned long *parent_rate,
                unsigned long *m, unsigned long *n)
 {
+       struct clk_fractional_divider *fd = to_clk_fd(hw);
        unsigned long p_rate, p_parent_rate;
        struct clk_hw *p_parent;
 
@@ -190,6 +191,8 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
                *parent_rate = p_parent_rate;
        }
 
+       fd->flags |= CLK_FRAC_DIVIDER_POWER_OF_TWO_PS;
+
        clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
 }
 
index 7b06fc0..efdf01f 100644 (file)
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CLK_SIFIVE_PRCI)  += sifive-prci.o fu540-prci.o fu740-prci.o
+obj-$(CONFIG_CLK_SIFIVE_PRCI)  += sifive-prci.o
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
deleted file mode 100644 (file)
index 29bab91..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018-2019 SiFive, Inc.
- * Copyright (C) 2018-2019 Wesley Terpstra
- * Copyright (C) 2018-2019 Paul Walmsley
- * Copyright (C) 2020 Zong Li
- *
- * The FU540 PRCI implements clock and reset control for the SiFive
- * FU540-C000 chip.  This driver assumes that it has sole control
- * over all PRCI resources.
- *
- * This driver is based on the PRCI driver written by Wesley Terpstra:
- * https://github.com/riscv/riscv-linux/commit/999529edf517ed75b56659d456d221b2ee56bb60
- *
- * References:
- * - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
- */
-
-#include <linux/module.h>
-
-#include <dt-bindings/clock/sifive-fu540-prci.h>
-
-#include "fu540-prci.h"
-#include "sifive-prci.h"
-
-/* PRCI integration data for each WRPLL instance */
-
-static struct __prci_wrpll_data __prci_corepll_data = {
-       .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
-       .enable_bypass = sifive_prci_coreclksel_use_hfclk,
-       .disable_bypass = sifive_prci_coreclksel_use_corepll,
-};
-
-static struct __prci_wrpll_data __prci_ddrpll_data = {
-       .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
-};
-
-static struct __prci_wrpll_data __prci_gemgxlpll_data = {
-       .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
-};
-
-/* Linux clock framework integration */
-
-static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
-       .set_rate = sifive_prci_wrpll_set_rate,
-       .round_rate = sifive_prci_wrpll_round_rate,
-       .recalc_rate = sifive_prci_wrpll_recalc_rate,
-       .enable = sifive_prci_clock_enable,
-       .disable = sifive_prci_clock_disable,
-       .is_enabled = sifive_clk_is_enabled,
-};
-
-static const struct clk_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
-       .recalc_rate = sifive_prci_wrpll_recalc_rate,
-};
-
-static const struct clk_ops sifive_fu540_prci_tlclksel_clk_ops = {
-       .recalc_rate = sifive_prci_tlclksel_recalc_rate,
-};
-
-/* List of clock controls provided by the PRCI */
-struct __prci_clock __prci_init_clocks_fu540[] = {
-       [PRCI_CLK_COREPLL] = {
-               .name = "corepll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu540_prci_wrpll_clk_ops,
-               .pwd = &__prci_corepll_data,
-       },
-       [PRCI_CLK_DDRPLL] = {
-               .name = "ddrpll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu540_prci_wrpll_ro_clk_ops,
-               .pwd = &__prci_ddrpll_data,
-       },
-       [PRCI_CLK_GEMGXLPLL] = {
-               .name = "gemgxlpll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu540_prci_wrpll_clk_ops,
-               .pwd = &__prci_gemgxlpll_data,
-       },
-       [PRCI_CLK_TLCLK] = {
-               .name = "tlclk",
-               .parent_name = "corepll",
-               .ops = &sifive_fu540_prci_tlclksel_clk_ops,
-       },
-};
index c220677..e017332 100644 (file)
@@ -1,16 +1,99 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2020 SiFive, Inc.
- * Zong Li
+ * Copyright (C) 2018-2021 SiFive, Inc.
+ * Copyright (C) 2018-2019 Wesley Terpstra
+ * Copyright (C) 2018-2019 Paul Walmsley
+ * Copyright (C) 2020-2021 Zong Li
+ *
+ * The FU540 PRCI implements clock and reset control for the SiFive
+ * FU540-C000 chip.  This driver assumes that it has sole control
+ * over all PRCI resources.
+ *
+ * This driver is based on the PRCI driver written by Wesley Terpstra:
+ * https://github.com/riscv/riscv-linux/commit/999529edf517ed75b56659d456d221b2ee56bb60
+ *
+ * References:
+ * - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
  */
 
 #ifndef __SIFIVE_CLK_FU540_PRCI_H
 #define __SIFIVE_CLK_FU540_PRCI_H
 
+
+#include <linux/module.h>
+
+#include <dt-bindings/clock/sifive-fu540-prci.h>
+
 #include "sifive-prci.h"
 
-#define NUM_CLOCK_FU540        4
+/* PRCI integration data for each WRPLL instance */
+
+static struct __prci_wrpll_data sifive_fu540_prci_corepll_data = {
+       .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
+       .enable_bypass = sifive_prci_coreclksel_use_hfclk,
+       .disable_bypass = sifive_prci_coreclksel_use_corepll,
+};
+
+static struct __prci_wrpll_data sifive_fu540_prci_ddrpll_data = {
+       .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
+};
+
+static struct __prci_wrpll_data sifive_fu540_prci_gemgxlpll_data = {
+       .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
+};
+
+/* Linux clock framework integration */
+
+static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
+       .set_rate = sifive_prci_wrpll_set_rate,
+       .round_rate = sifive_prci_wrpll_round_rate,
+       .recalc_rate = sifive_prci_wrpll_recalc_rate,
+       .enable = sifive_prci_clock_enable,
+       .disable = sifive_prci_clock_disable,
+       .is_enabled = sifive_clk_is_enabled,
+};
+
+static const struct clk_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
+       .recalc_rate = sifive_prci_wrpll_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu540_prci_tlclksel_clk_ops = {
+       .recalc_rate = sifive_prci_tlclksel_recalc_rate,
+};
+
+/* List of clock controls provided by the PRCI */
+static struct __prci_clock __prci_init_clocks_fu540[] = {
+       [FU540_PRCI_CLK_COREPLL] = {
+               .name = "corepll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu540_prci_wrpll_clk_ops,
+               .pwd = &sifive_fu540_prci_corepll_data,
+       },
+       [FU540_PRCI_CLK_DDRPLL] = {
+               .name = "ddrpll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu540_prci_wrpll_ro_clk_ops,
+               .pwd = &sifive_fu540_prci_ddrpll_data,
+       },
+       [FU540_PRCI_CLK_GEMGXLPLL] = {
+               .name = "gemgxlpll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu540_prci_wrpll_clk_ops,
+               .pwd = &sifive_fu540_prci_gemgxlpll_data,
+       },
+       [FU540_PRCI_CLK_TLCLK] = {
+               .name = "tlclk",
+               .parent_name = "corepll",
+               .ops = &sifive_fu540_prci_tlclksel_clk_ops,
+       },
+};
 
-extern struct __prci_clock __prci_init_clocks_fu540[NUM_CLOCK_FU540];
+static const struct prci_clk_desc prci_clk_fu540 = {
+       .clks = __prci_init_clocks_fu540,
+       .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
+};
 
 #endif /* __SIFIVE_CLK_FU540_PRCI_H */
diff --git a/drivers/clk/sifive/fu740-prci.c b/drivers/clk/sifive/fu740-prci.c
deleted file mode 100644 (file)
index 53f6e00..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2020 SiFive, Inc.
- * Copyright (C) 2020 Zong Li
- */
-
-#include <linux/module.h>
-
-#include <dt-bindings/clock/sifive-fu740-prci.h>
-
-#include "fu540-prci.h"
-#include "sifive-prci.h"
-
-/* PRCI integration data for each WRPLL instance */
-
-static struct __prci_wrpll_data __prci_corepll_data = {
-       .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
-       .enable_bypass = sifive_prci_coreclksel_use_hfclk,
-       .disable_bypass = sifive_prci_coreclksel_use_final_corepll,
-};
-
-static struct __prci_wrpll_data __prci_ddrpll_data = {
-       .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
-};
-
-static struct __prci_wrpll_data __prci_gemgxlpll_data = {
-       .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
-};
-
-static struct __prci_wrpll_data __prci_dvfscorepll_data = {
-       .cfg0_offs = PRCI_DVFSCOREPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_DVFSCOREPLLCFG1_OFFSET,
-       .enable_bypass = sifive_prci_corepllsel_use_corepll,
-       .disable_bypass = sifive_prci_corepllsel_use_dvfscorepll,
-};
-
-static struct __prci_wrpll_data __prci_hfpclkpll_data = {
-       .cfg0_offs = PRCI_HFPCLKPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_HFPCLKPLLCFG1_OFFSET,
-       .enable_bypass = sifive_prci_hfpclkpllsel_use_hfclk,
-       .disable_bypass = sifive_prci_hfpclkpllsel_use_hfpclkpll,
-};
-
-static struct __prci_wrpll_data __prci_cltxpll_data = {
-       .cfg0_offs = PRCI_CLTXPLLCFG0_OFFSET,
-       .cfg1_offs = PRCI_CLTXPLLCFG1_OFFSET,
-};
-
-/* Linux clock framework integration */
-
-static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
-       .set_rate = sifive_prci_wrpll_set_rate,
-       .round_rate = sifive_prci_wrpll_round_rate,
-       .recalc_rate = sifive_prci_wrpll_recalc_rate,
-       .enable = sifive_prci_clock_enable,
-       .disable = sifive_prci_clock_disable,
-       .is_enabled = sifive_clk_is_enabled,
-};
-
-static const struct clk_ops sifive_fu740_prci_wrpll_ro_clk_ops = {
-       .recalc_rate = sifive_prci_wrpll_recalc_rate,
-};
-
-static const struct clk_ops sifive_fu740_prci_tlclksel_clk_ops = {
-       .recalc_rate = sifive_prci_tlclksel_recalc_rate,
-};
-
-static const struct clk_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = {
-       .recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate,
-};
-
-static const struct clk_ops sifive_fu740_prci_pcie_aux_clk_ops = {
-       .enable = sifive_prci_pcie_aux_clock_enable,
-       .disable = sifive_prci_pcie_aux_clock_disable,
-       .is_enabled = sifive_prci_pcie_aux_clock_is_enabled,
-};
-
-/* List of clock controls provided by the PRCI */
-struct __prci_clock __prci_init_clocks_fu740[] = {
-       [PRCI_CLK_COREPLL] = {
-               .name = "corepll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu740_prci_wrpll_clk_ops,
-               .pwd = &__prci_corepll_data,
-       },
-       [PRCI_CLK_DDRPLL] = {
-               .name = "ddrpll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu740_prci_wrpll_ro_clk_ops,
-               .pwd = &__prci_ddrpll_data,
-       },
-       [PRCI_CLK_GEMGXLPLL] = {
-               .name = "gemgxlpll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu740_prci_wrpll_clk_ops,
-               .pwd = &__prci_gemgxlpll_data,
-       },
-       [PRCI_CLK_DVFSCOREPLL] = {
-               .name = "dvfscorepll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu740_prci_wrpll_clk_ops,
-               .pwd = &__prci_dvfscorepll_data,
-       },
-       [PRCI_CLK_HFPCLKPLL] = {
-               .name = "hfpclkpll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu740_prci_wrpll_clk_ops,
-               .pwd = &__prci_hfpclkpll_data,
-       },
-       [PRCI_CLK_CLTXPLL] = {
-               .name = "cltxpll",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu740_prci_wrpll_clk_ops,
-               .pwd = &__prci_cltxpll_data,
-       },
-       [PRCI_CLK_TLCLK] = {
-               .name = "tlclk",
-               .parent_name = "corepll",
-               .ops = &sifive_fu740_prci_tlclksel_clk_ops,
-       },
-       [PRCI_CLK_PCLK] = {
-               .name = "pclk",
-               .parent_name = "hfpclkpll",
-               .ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops,
-       },
-       [PRCI_CLK_PCIE_AUX] = {
-               .name = "pcie_aux",
-               .parent_name = "hfclk",
-               .ops = &sifive_fu740_prci_pcie_aux_clk_ops,
-       },
-};
index 511a0bf..f31cd30 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2020 SiFive, Inc.
- * Zong Li
+ * Copyright (C) 2020-2021 SiFive, Inc.
+ * Copyright (C) 2020-2021 Zong Li
  */
 
 #ifndef __SIFIVE_CLK_FU740_PRCI_H
 #define __SIFIVE_CLK_FU740_PRCI_H
 
+#include <linux/module.h>
+
+#include <dt-bindings/clock/sifive-fu740-prci.h>
+
 #include "sifive-prci.h"
 
-#define NUM_CLOCK_FU740        9
+/* PRCI integration data for each WRPLL instance */
+
+static struct __prci_wrpll_data sifive_fu740_prci_corepll_data = {
+       .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
+       .enable_bypass = sifive_prci_coreclksel_use_hfclk,
+       .disable_bypass = sifive_prci_coreclksel_use_final_corepll,
+};
+
+static struct __prci_wrpll_data sifive_fu740_prci_ddrpll_data = {
+       .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
+};
+
+static struct __prci_wrpll_data sifive_fu740_prci_gemgxlpll_data = {
+       .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
+};
+
+static struct __prci_wrpll_data sifive_fu740_prci_dvfscorepll_data = {
+       .cfg0_offs = PRCI_DVFSCOREPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_DVFSCOREPLLCFG1_OFFSET,
+       .enable_bypass = sifive_prci_corepllsel_use_corepll,
+       .disable_bypass = sifive_prci_corepllsel_use_dvfscorepll,
+};
+
+static struct __prci_wrpll_data sifive_fu740_prci_hfpclkpll_data = {
+       .cfg0_offs = PRCI_HFPCLKPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_HFPCLKPLLCFG1_OFFSET,
+       .enable_bypass = sifive_prci_hfpclkpllsel_use_hfclk,
+       .disable_bypass = sifive_prci_hfpclkpllsel_use_hfpclkpll,
+};
+
+static struct __prci_wrpll_data sifive_fu740_prci_cltxpll_data = {
+       .cfg0_offs = PRCI_CLTXPLLCFG0_OFFSET,
+       .cfg1_offs = PRCI_CLTXPLLCFG1_OFFSET,
+};
+
+/* Linux clock framework integration */
+
+static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
+       .set_rate = sifive_prci_wrpll_set_rate,
+       .round_rate = sifive_prci_wrpll_round_rate,
+       .recalc_rate = sifive_prci_wrpll_recalc_rate,
+       .enable = sifive_prci_clock_enable,
+       .disable = sifive_prci_clock_disable,
+       .is_enabled = sifive_clk_is_enabled,
+};
 
-extern struct __prci_clock __prci_init_clocks_fu740[NUM_CLOCK_FU740];
+static const struct clk_ops sifive_fu740_prci_wrpll_ro_clk_ops = {
+       .recalc_rate = sifive_prci_wrpll_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu740_prci_tlclksel_clk_ops = {
+       .recalc_rate = sifive_prci_tlclksel_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = {
+       .recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu740_prci_pcie_aux_clk_ops = {
+       .enable = sifive_prci_pcie_aux_clock_enable,
+       .disable = sifive_prci_pcie_aux_clock_disable,
+       .is_enabled = sifive_prci_pcie_aux_clock_is_enabled,
+};
+
+/* List of clock controls provided by the PRCI */
+static struct __prci_clock __prci_init_clocks_fu740[] = {
+       [FU740_PRCI_CLK_COREPLL] = {
+               .name = "corepll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu740_prci_wrpll_clk_ops,
+               .pwd = &sifive_fu740_prci_corepll_data,
+       },
+       [FU740_PRCI_CLK_DDRPLL] = {
+               .name = "ddrpll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu740_prci_wrpll_ro_clk_ops,
+               .pwd = &sifive_fu740_prci_ddrpll_data,
+       },
+       [FU740_PRCI_CLK_GEMGXLPLL] = {
+               .name = "gemgxlpll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu740_prci_wrpll_clk_ops,
+               .pwd = &sifive_fu740_prci_gemgxlpll_data,
+       },
+       [FU740_PRCI_CLK_DVFSCOREPLL] = {
+               .name = "dvfscorepll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu740_prci_wrpll_clk_ops,
+               .pwd = &sifive_fu740_prci_dvfscorepll_data,
+       },
+       [FU740_PRCI_CLK_HFPCLKPLL] = {
+               .name = "hfpclkpll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu740_prci_wrpll_clk_ops,
+               .pwd = &sifive_fu740_prci_hfpclkpll_data,
+       },
+       [FU740_PRCI_CLK_CLTXPLL] = {
+               .name = "cltxpll",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu740_prci_wrpll_clk_ops,
+               .pwd = &sifive_fu740_prci_cltxpll_data,
+       },
+       [FU740_PRCI_CLK_TLCLK] = {
+               .name = "tlclk",
+               .parent_name = "corepll",
+               .ops = &sifive_fu740_prci_tlclksel_clk_ops,
+       },
+       [FU740_PRCI_CLK_PCLK] = {
+               .name = "pclk",
+               .parent_name = "hfpclkpll",
+               .ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops,
+       },
+       [FU740_PRCI_CLK_PCIE_AUX] = {
+               .name = "pcie_aux",
+               .parent_name = "hfclk",
+               .ops = &sifive_fu740_prci_pcie_aux_clk_ops,
+       },
+};
 
 static const struct prci_clk_desc prci_clk_fu740 = {
        .clks = __prci_init_clocks_fu740,
index 80a288c..916d2fc 100644 (file)
 #include "fu540-prci.h"
 #include "fu740-prci.h"
 
-static const struct prci_clk_desc prci_clk_fu540 = {
-       .clks = __prci_init_clocks_fu540,
-       .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
-};
-
 /*
  * Private functions
  */
index 3256779..3930d92 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier:    GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017, Intel Corporation
  */
index cbabde2..f5c1ca4 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier:    GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017, Intel Corporation
  */
index e444e4a..1d82737 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier:    GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017, Intel Corporation
  */
index 4e508a8..9b2e027 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier:    GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017, Intel Corporation
  */
index c0fa9d5..003bd2d 100644 (file)
@@ -7,3 +7,11 @@ config CLK_STARFIVE_JH7100
        help
          Say yes here to support the clock controller on the StarFive JH7100
          SoC.
+
+config CLK_STARFIVE_JH7100_AUDIO
+       tristate "StarFive JH7100 audio clock support"
+       depends on CLK_STARFIVE_JH7100
+       default m if SOC_STARFIVE
+       help
+         Say Y or M here to support the audio clocks on the StarFive JH7100
+         SoC.
index 09759cc..0fa8ecb 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 # StarFive Clock
 obj-$(CONFIG_CLK_STARFIVE_JH7100)      += clk-starfive-jh7100.o
+obj-$(CONFIG_CLK_STARFIVE_JH7100_AUDIO)        += clk-starfive-jh7100-audio.o
diff --git a/drivers/clk/starfive/clk-starfive-jh7100-audio.c b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
new file mode 100644 (file)
index 0000000..8473a65
--- /dev/null
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive JH7100 Audio Clock Driver
+ *
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/starfive-jh7100-audio.h>
+
+#include "clk-starfive-jh7100.h"
+
+/* external clocks */
+#define JH7100_AUDCLK_AUDIO_SRC                        (JH7100_AUDCLK_END + 0)
+#define JH7100_AUDCLK_AUDIO_12288              (JH7100_AUDCLK_END + 1)
+#define JH7100_AUDCLK_DOM7AHB_BUS              (JH7100_AUDCLK_END + 2)
+#define JH7100_AUDCLK_I2SADC_BCLK_IOPAD                (JH7100_AUDCLK_END + 3)
+#define JH7100_AUDCLK_I2SADC_LRCLK_IOPAD       (JH7100_AUDCLK_END + 4)
+#define JH7100_AUDCLK_I2SDAC_BCLK_IOPAD                (JH7100_AUDCLK_END + 5)
+#define JH7100_AUDCLK_I2SDAC_LRCLK_IOPAD       (JH7100_AUDCLK_END + 6)
+#define JH7100_AUDCLK_VAD_INTMEM                (JH7100_AUDCLK_END + 7)
+
+static const struct jh7100_clk_data jh7100_audclk_data[] = {
+       JH7100__GMD(JH7100_AUDCLK_ADC_MCLK, "adc_mclk", 0, 15, 2,
+                   JH7100_AUDCLK_AUDIO_SRC,
+                   JH7100_AUDCLK_AUDIO_12288),
+       JH7100__GMD(JH7100_AUDCLK_I2S1_MCLK, "i2s1_mclk", 0, 15, 2,
+                   JH7100_AUDCLK_AUDIO_SRC,
+                   JH7100_AUDCLK_AUDIO_12288),
+       JH7100_GATE(JH7100_AUDCLK_I2SADC_APB, "i2sadc_apb", 0, JH7100_AUDCLK_APB0_BUS),
+       JH7100_MDIV(JH7100_AUDCLK_I2SADC_BCLK, "i2sadc_bclk", 31, 2,
+                   JH7100_AUDCLK_ADC_MCLK,
+                   JH7100_AUDCLK_I2SADC_BCLK_IOPAD),
+       JH7100__INV(JH7100_AUDCLK_I2SADC_BCLK_N, "i2sadc_bclk_n", JH7100_AUDCLK_I2SADC_BCLK),
+       JH7100_MDIV(JH7100_AUDCLK_I2SADC_LRCLK, "i2sadc_lrclk", 63, 3,
+                   JH7100_AUDCLK_I2SADC_BCLK_N,
+                   JH7100_AUDCLK_I2SADC_LRCLK_IOPAD,
+                   JH7100_AUDCLK_I2SADC_BCLK),
+       JH7100_GATE(JH7100_AUDCLK_PDM_APB, "pdm_apb", 0, JH7100_AUDCLK_APB0_BUS),
+       JH7100__GMD(JH7100_AUDCLK_PDM_MCLK, "pdm_mclk", 0, 15, 2,
+                   JH7100_AUDCLK_AUDIO_SRC,
+                   JH7100_AUDCLK_AUDIO_12288),
+       JH7100_GATE(JH7100_AUDCLK_I2SVAD_APB, "i2svad_apb", 0, JH7100_AUDCLK_APB0_BUS),
+       JH7100__GMD(JH7100_AUDCLK_SPDIF, "spdif", 0, 15, 2,
+                   JH7100_AUDCLK_AUDIO_SRC,
+                   JH7100_AUDCLK_AUDIO_12288),
+       JH7100_GATE(JH7100_AUDCLK_SPDIF_APB, "spdif_apb", 0, JH7100_AUDCLK_APB0_BUS),
+       JH7100_GATE(JH7100_AUDCLK_PWMDAC_APB, "pwmdac_apb", 0, JH7100_AUDCLK_APB0_BUS),
+       JH7100__GMD(JH7100_AUDCLK_DAC_MCLK, "dac_mclk", 0, 15, 2,
+                   JH7100_AUDCLK_AUDIO_SRC,
+                   JH7100_AUDCLK_AUDIO_12288),
+       JH7100_GATE(JH7100_AUDCLK_I2SDAC_APB, "i2sdac_apb", 0, JH7100_AUDCLK_APB0_BUS),
+       JH7100_MDIV(JH7100_AUDCLK_I2SDAC_BCLK, "i2sdac_bclk", 31, 2,
+                   JH7100_AUDCLK_DAC_MCLK,
+                   JH7100_AUDCLK_I2SDAC_BCLK_IOPAD),
+       JH7100__INV(JH7100_AUDCLK_I2SDAC_BCLK_N, "i2sdac_bclk_n", JH7100_AUDCLK_I2SDAC_BCLK),
+       JH7100_MDIV(JH7100_AUDCLK_I2SDAC_LRCLK, "i2sdac_lrclk", 31, 2,
+                   JH7100_AUDCLK_I2S1_MCLK,
+                   JH7100_AUDCLK_I2SDAC_BCLK_IOPAD),
+       JH7100_GATE(JH7100_AUDCLK_I2S1_APB, "i2s1_apb", 0, JH7100_AUDCLK_APB0_BUS),
+       JH7100_MDIV(JH7100_AUDCLK_I2S1_BCLK, "i2s1_bclk", 31, 2,
+                   JH7100_AUDCLK_I2S1_MCLK,
+                   JH7100_AUDCLK_I2SDAC_BCLK_IOPAD),
+       JH7100__INV(JH7100_AUDCLK_I2S1_BCLK_N, "i2s1_bclk_n", JH7100_AUDCLK_I2S1_BCLK),
+       JH7100_MDIV(JH7100_AUDCLK_I2S1_LRCLK, "i2s1_lrclk", 63, 3,
+                   JH7100_AUDCLK_I2S1_BCLK_N,
+                   JH7100_AUDCLK_I2SDAC_LRCLK_IOPAD),
+       JH7100_GATE(JH7100_AUDCLK_I2SDAC16K_APB, "i2s1dac16k_apb", 0, JH7100_AUDCLK_APB0_BUS),
+       JH7100__DIV(JH7100_AUDCLK_APB0_BUS, "apb0_bus", 8, JH7100_AUDCLK_DOM7AHB_BUS),
+       JH7100_GATE(JH7100_AUDCLK_DMA1P_AHB, "dma1p_ahb", 0, JH7100_AUDCLK_DOM7AHB_BUS),
+       JH7100_GATE(JH7100_AUDCLK_USB_APB, "usb_apb", CLK_IGNORE_UNUSED, JH7100_AUDCLK_APB_EN),
+       JH7100_GDIV(JH7100_AUDCLK_USB_LPM, "usb_lpm", CLK_IGNORE_UNUSED, 4, JH7100_AUDCLK_USB_APB),
+       JH7100_GDIV(JH7100_AUDCLK_USB_STB, "usb_stb", CLK_IGNORE_UNUSED, 3, JH7100_AUDCLK_USB_APB),
+       JH7100__DIV(JH7100_AUDCLK_APB_EN, "apb_en", 8, JH7100_AUDCLK_DOM7AHB_BUS),
+       JH7100__MUX(JH7100_AUDCLK_VAD_MEM, "vad_mem", 2,
+                   JH7100_AUDCLK_VAD_INTMEM,
+                   JH7100_AUDCLK_AUDIO_12288),
+};
+
+static struct clk_hw *jh7100_audclk_get(struct of_phandle_args *clkspec, void *data)
+{
+       struct jh7100_clk_priv *priv = data;
+       unsigned int idx = clkspec->args[0];
+
+       if (idx < JH7100_AUDCLK_END)
+               return &priv->reg[idx].hw;
+
+       return ERR_PTR(-EINVAL);
+}
+
+static int jh7100_audclk_probe(struct platform_device *pdev)
+{
+       struct jh7100_clk_priv *priv;
+       unsigned int idx;
+       int ret;
+
+       priv = devm_kzalloc(&pdev->dev, struct_size(priv, reg, JH7100_AUDCLK_END), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       spin_lock_init(&priv->rmw_lock);
+       priv->dev = &pdev->dev;
+       priv->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       for (idx = 0; idx < JH7100_AUDCLK_END; idx++) {
+               u32 max = jh7100_audclk_data[idx].max;
+               struct clk_parent_data parents[4] = {};
+               struct clk_init_data init = {
+                       .name = jh7100_audclk_data[idx].name,
+                       .ops = starfive_jh7100_clk_ops(max),
+                       .parent_data = parents,
+                       .num_parents = ((max & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT) + 1,
+                       .flags = jh7100_audclk_data[idx].flags,
+               };
+               struct jh7100_clk *clk = &priv->reg[idx];
+               unsigned int i;
+
+               for (i = 0; i < init.num_parents; i++) {
+                       unsigned int pidx = jh7100_audclk_data[idx].parents[i];
+
+                       if (pidx < JH7100_AUDCLK_END)
+                               parents[i].hw = &priv->reg[pidx].hw;
+                       else if (pidx == JH7100_AUDCLK_AUDIO_SRC)
+                               parents[i].fw_name = "audio_src";
+                       else if (pidx == JH7100_AUDCLK_AUDIO_12288)
+                               parents[i].fw_name = "audio_12288";
+                       else if (pidx == JH7100_AUDCLK_DOM7AHB_BUS)
+                               parents[i].fw_name = "dom7ahb_bus";
+               }
+
+               clk->hw.init = &init;
+               clk->idx = idx;
+               clk->max_div = max & JH7100_CLK_DIV_MASK;
+
+               ret = devm_clk_hw_register(priv->dev, &clk->hw);
+               if (ret)
+                       return ret;
+       }
+
+       return devm_of_clk_add_hw_provider(priv->dev, jh7100_audclk_get, priv);
+}
+
+static const struct of_device_id jh7100_audclk_match[] = {
+       { .compatible = "starfive,jh7100-audclk" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jh7100_audclk_match);
+
+static struct platform_driver jh7100_audclk_driver = {
+       .probe = jh7100_audclk_probe,
+       .driver = {
+               .name = "clk-starfive-jh7100-audio",
+               .of_match_table = jh7100_audclk_match,
+       },
+};
+module_platform_driver(jh7100_audclk_driver);
+
+MODULE_AUTHOR("Emil Renner Berthing");
+MODULE_DESCRIPTION("StarFive JH7100 audio clock driver");
+MODULE_LICENSE("GPL v2");
index 25d31af..691aeeb 100644 (file)
 
 #include <dt-bindings/clock/starfive-jh7100.h>
 
+#include "clk-starfive-jh7100.h"
+
 /* external clocks */
 #define JH7100_CLK_OSC_SYS             (JH7100_CLK_END + 0)
 #define JH7100_CLK_OSC_AUD             (JH7100_CLK_END + 1)
 #define JH7100_CLK_GMAC_RMII_REF       (JH7100_CLK_END + 2)
 #define JH7100_CLK_GMAC_GR_MII_RX      (JH7100_CLK_END + 3)
 
-/* register fields */
-#define JH7100_CLK_ENABLE      BIT(31)
-#define JH7100_CLK_INVERT      BIT(30)
-#define JH7100_CLK_MUX_MASK    GENMASK(27, 24)
-#define JH7100_CLK_MUX_SHIFT   24
-#define JH7100_CLK_DIV_MASK    GENMASK(23, 0)
-
-/* clock data */
-#define JH7100_GATE(_idx, _name, _flags, _parent) [_idx] = {           \
-       .name = _name,                                                  \
-       .flags = CLK_SET_RATE_PARENT | (_flags),                        \
-       .max = JH7100_CLK_ENABLE,                                       \
-       .parents = { [0] = _parent },                                   \
-}
-
-#define JH7100__DIV(_idx, _name, _max, _parent) [_idx] = {             \
-       .name = _name,                                                  \
-       .flags = 0,                                                     \
-       .max = _max,                                                    \
-       .parents = { [0] = _parent },                                   \
-}
-
-#define JH7100_GDIV(_idx, _name, _flags, _max, _parent) [_idx] = {     \
-       .name = _name,                                                  \
-       .flags = _flags,                                                \
-       .max = JH7100_CLK_ENABLE | (_max),                              \
-       .parents = { [0] = _parent },                                   \
-}
-
-#define JH7100__MUX(_idx, _name, _nparents, ...) [_idx] = {            \
-       .name = _name,                                                  \
-       .flags = 0,                                                     \
-       .max = ((_nparents) - 1) << JH7100_CLK_MUX_SHIFT,               \
-       .parents = { __VA_ARGS__ },                                     \
-}
-
-#define JH7100_GMUX(_idx, _name, _flags, _nparents, ...) [_idx] = {    \
-       .name = _name,                                                  \
-       .flags = _flags,                                                \
-       .max = JH7100_CLK_ENABLE |                                      \
-               (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT),            \
-       .parents = { __VA_ARGS__ },                                     \
-}
-
-#define JH7100__INV(_idx, _name, _parent) [_idx] = {                   \
-       .name = _name,                                                  \
-       .flags = CLK_SET_RATE_PARENT,                                   \
-       .max = JH7100_CLK_INVERT,                                       \
-       .parents = { [0] = _parent },                                   \
-}
-
-static const struct {
-       const char *name;
-       unsigned long flags;
-       u32 max;
-       u8 parents[4];
-} jh7100_clk_data[] __initconst = {
+static const struct jh7100_clk_data jh7100_clk_data[] __initconst = {
        JH7100__MUX(JH7100_CLK_CPUNDBUS_ROOT, "cpundbus_root", 4,
                    JH7100_CLK_OSC_SYS,
                    JH7100_CLK_PLL0_OUT,
@@ -225,7 +171,7 @@ static const struct {
        JH7100__MUX(JH7100_CLK_USBPHY_25M, "usbphy_25m", 2,
                    JH7100_CLK_OSC_SYS,
                    JH7100_CLK_USBPHY_PLLDIV25M),
-       JH7100__DIV(JH7100_CLK_AUDIO_DIV, "audio_div", 131072, JH7100_CLK_AUDIO_ROOT),
+       JH7100_FDIV(JH7100_CLK_AUDIO_DIV, "audio_div", JH7100_CLK_AUDIO_ROOT),
        JH7100_GATE(JH7100_CLK_AUDIO_SRC, "audio_src", 0, JH7100_CLK_AUDIO_DIV),
        JH7100_GATE(JH7100_CLK_AUDIO_12288, "audio_12288", 0, JH7100_CLK_OSC_AUD),
        JH7100_GDIV(JH7100_CLK_VIN_SRC, "vin_src", 0, 4, JH7100_CLK_VIN_ROOT),
@@ -323,21 +269,6 @@ static const struct {
        JH7100_GATE(JH7100_CLK_SYSERR_APB, "syserr_apb", 0, JH7100_CLK_APB2_BUS),
 };
 
-struct jh7100_clk {
-       struct clk_hw hw;
-       unsigned int idx;
-       unsigned int max_div;
-};
-
-struct jh7100_clk_priv {
-       /* protect clk enable and set rate/parent from happening at the same time */
-       spinlock_t rmw_lock;
-       struct device *dev;
-       void __iomem *base;
-       struct clk_hw *pll[3];
-       struct jh7100_clk reg[JH7100_CLK_PLL0_OUT];
-};
-
 static struct jh7100_clk *jh7100_clk_from(struct clk_hw *hw)
 {
        return container_of(hw, struct jh7100_clk, hw);
@@ -399,22 +330,13 @@ static unsigned long jh7100_clk_recalc_rate(struct clk_hw *hw,
        return div ? parent_rate / div : 0;
 }
 
-static unsigned long jh7100_clk_bestdiv(struct jh7100_clk *clk,
-                                       unsigned long rate, unsigned long parent)
-{
-       unsigned long max = clk->max_div;
-       unsigned long div = DIV_ROUND_UP(parent, rate);
-
-       return min(div, max);
-}
-
 static int jh7100_clk_determine_rate(struct clk_hw *hw,
                                     struct clk_rate_request *req)
 {
        struct jh7100_clk *clk = jh7100_clk_from(hw);
        unsigned long parent = req->best_parent_rate;
        unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
-       unsigned long div = jh7100_clk_bestdiv(clk, rate, parent);
+       unsigned long div = min_t(unsigned long, DIV_ROUND_UP(parent, rate), clk->max_div);
        unsigned long result = parent / div;
 
        /*
@@ -442,12 +364,56 @@ static int jh7100_clk_set_rate(struct clk_hw *hw,
                               unsigned long parent_rate)
 {
        struct jh7100_clk *clk = jh7100_clk_from(hw);
-       unsigned long div = jh7100_clk_bestdiv(clk, rate, parent_rate);
+       unsigned long div = clamp(DIV_ROUND_CLOSEST(parent_rate, rate),
+                                 1UL, (unsigned long)clk->max_div);
 
        jh7100_clk_reg_rmw(clk, JH7100_CLK_DIV_MASK, div);
        return 0;
 }
 
+static unsigned long jh7100_clk_frac_recalc_rate(struct clk_hw *hw,
+                                                unsigned long parent_rate)
+{
+       struct jh7100_clk *clk = jh7100_clk_from(hw);
+       u32 reg = jh7100_clk_reg_get(clk);
+       unsigned long div100 = 100 * (reg & JH7100_CLK_INT_MASK) +
+                              ((reg & JH7100_CLK_FRAC_MASK) >> JH7100_CLK_FRAC_SHIFT);
+
+       return (div100 >= JH7100_CLK_FRAC_MIN) ? 100 * parent_rate / div100 : 0;
+}
+
+static int jh7100_clk_frac_determine_rate(struct clk_hw *hw,
+                                         struct clk_rate_request *req)
+{
+       unsigned long parent100 = 100 * req->best_parent_rate;
+       unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
+       unsigned long div100 = clamp(DIV_ROUND_CLOSEST(parent100, rate),
+                                    JH7100_CLK_FRAC_MIN, JH7100_CLK_FRAC_MAX);
+       unsigned long result = parent100 / div100;
+
+       /* clamp the result as in jh7100_clk_determine_rate() above */
+       if (result > req->max_rate && div100 < JH7100_CLK_FRAC_MAX)
+               result = parent100 / (div100 + 1);
+       if (result < req->min_rate && div100 > JH7100_CLK_FRAC_MIN)
+               result = parent100 / (div100 - 1);
+
+       req->rate = result;
+       return 0;
+}
+
+static int jh7100_clk_frac_set_rate(struct clk_hw *hw,
+                                   unsigned long rate,
+                                   unsigned long parent_rate)
+{
+       struct jh7100_clk *clk = jh7100_clk_from(hw);
+       unsigned long div100 = clamp(DIV_ROUND_CLOSEST(100 * parent_rate, rate),
+                                    JH7100_CLK_FRAC_MIN, JH7100_CLK_FRAC_MAX);
+       u32 value = ((div100 % 100) << JH7100_CLK_FRAC_SHIFT) | (div100 / 100);
+
+       jh7100_clk_reg_rmw(clk, JH7100_CLK_DIV_MASK, value);
+       return 0;
+}
+
 static u8 jh7100_clk_get_parent(struct clk_hw *hw)
 {
        struct jh7100_clk *clk = jh7100_clk_from(hw);
@@ -534,6 +500,13 @@ static const struct clk_ops jh7100_clk_div_ops = {
        .debug_init = jh7100_clk_debug_init,
 };
 
+static const struct clk_ops jh7100_clk_fdiv_ops = {
+       .recalc_rate = jh7100_clk_frac_recalc_rate,
+       .determine_rate = jh7100_clk_frac_determine_rate,
+       .set_rate = jh7100_clk_frac_set_rate,
+       .debug_init = jh7100_clk_debug_init,
+};
+
 static const struct clk_ops jh7100_clk_gdiv_ops = {
        .enable = jh7100_clk_enable,
        .disable = jh7100_clk_disable,
@@ -561,17 +534,45 @@ static const struct clk_ops jh7100_clk_gmux_ops = {
        .debug_init = jh7100_clk_debug_init,
 };
 
+static const struct clk_ops jh7100_clk_mdiv_ops = {
+       .recalc_rate = jh7100_clk_recalc_rate,
+       .determine_rate = jh7100_clk_determine_rate,
+       .get_parent = jh7100_clk_get_parent,
+       .set_parent = jh7100_clk_set_parent,
+       .set_rate = jh7100_clk_set_rate,
+       .debug_init = jh7100_clk_debug_init,
+};
+
+static const struct clk_ops jh7100_clk_gmd_ops = {
+       .enable = jh7100_clk_enable,
+       .disable = jh7100_clk_disable,
+       .is_enabled = jh7100_clk_is_enabled,
+       .recalc_rate = jh7100_clk_recalc_rate,
+       .determine_rate = jh7100_clk_determine_rate,
+       .get_parent = jh7100_clk_get_parent,
+       .set_parent = jh7100_clk_set_parent,
+       .set_rate = jh7100_clk_set_rate,
+       .debug_init = jh7100_clk_debug_init,
+};
+
 static const struct clk_ops jh7100_clk_inv_ops = {
        .get_phase = jh7100_clk_get_phase,
        .set_phase = jh7100_clk_set_phase,
        .debug_init = jh7100_clk_debug_init,
 };
 
-static const struct clk_ops *__init jh7100_clk_ops(u32 max)
+const struct clk_ops *starfive_jh7100_clk_ops(u32 max)
 {
        if (max & JH7100_CLK_DIV_MASK) {
+               if (max & JH7100_CLK_MUX_MASK) {
+                       if (max & JH7100_CLK_ENABLE)
+                               return &jh7100_clk_gmd_ops;
+                       return &jh7100_clk_mdiv_ops;
+               }
                if (max & JH7100_CLK_ENABLE)
                        return &jh7100_clk_gdiv_ops;
+               if (max == JH7100_CLK_FRAC_MAX)
+                       return &jh7100_clk_fdiv_ops;
                return &jh7100_clk_div_ops;
        }
 
@@ -586,6 +587,7 @@ static const struct clk_ops *__init jh7100_clk_ops(u32 max)
 
        return &jh7100_clk_inv_ops;
 }
+EXPORT_SYMBOL_GPL(starfive_jh7100_clk_ops);
 
 static struct clk_hw *jh7100_clk_get(struct of_phandle_args *clkspec, void *data)
 {
@@ -607,7 +609,7 @@ static int __init clk_starfive_jh7100_probe(struct platform_device *pdev)
        unsigned int idx;
        int ret;
 
-       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       priv = devm_kzalloc(&pdev->dev, struct_size(priv, reg, JH7100_CLK_PLL0_OUT), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
@@ -637,7 +639,7 @@ static int __init clk_starfive_jh7100_probe(struct platform_device *pdev)
                struct clk_parent_data parents[4] = {};
                struct clk_init_data init = {
                        .name = jh7100_clk_data[idx].name,
-                       .ops = jh7100_clk_ops(max),
+                       .ops = starfive_jh7100_clk_ops(max),
                        .parent_data = parents,
                        .num_parents = ((max & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT) + 1,
                        .flags = jh7100_clk_data[idx].flags,
diff --git a/drivers/clk/starfive/clk-starfive-jh7100.h b/drivers/clk/starfive/clk-starfive-jh7100.h
new file mode 100644 (file)
index 0000000..f116be5
--- /dev/null
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CLK_STARFIVE_JH7100_H
+#define __CLK_STARFIVE_JH7100_H
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+
+/* register fields */
+#define JH7100_CLK_ENABLE      BIT(31)
+#define JH7100_CLK_INVERT      BIT(30)
+#define JH7100_CLK_MUX_MASK    GENMASK(27, 24)
+#define JH7100_CLK_MUX_SHIFT   24
+#define JH7100_CLK_DIV_MASK    GENMASK(23, 0)
+#define JH7100_CLK_FRAC_MASK   GENMASK(15, 8)
+#define JH7100_CLK_FRAC_SHIFT  8
+#define JH7100_CLK_INT_MASK    GENMASK(7, 0)
+
+/* fractional divider min/max */
+#define JH7100_CLK_FRAC_MIN    100UL
+#define JH7100_CLK_FRAC_MAX    25599UL
+
+/* clock data */
+struct jh7100_clk_data {
+       const char *name;
+       unsigned long flags;
+       u32 max;
+       u8 parents[4];
+};
+
+#define JH7100_GATE(_idx, _name, _flags, _parent) [_idx] = {                   \
+       .name = _name,                                                          \
+       .flags = CLK_SET_RATE_PARENT | (_flags),                                \
+       .max = JH7100_CLK_ENABLE,                                               \
+       .parents = { [0] = _parent },                                           \
+}
+
+#define JH7100__DIV(_idx, _name, _max, _parent) [_idx] = {                     \
+       .name = _name,                                                          \
+       .flags = 0,                                                             \
+       .max = _max,                                                            \
+       .parents = { [0] = _parent },                                           \
+}
+
+#define JH7100_GDIV(_idx, _name, _flags, _max, _parent) [_idx] = {             \
+       .name = _name,                                                          \
+       .flags = _flags,                                                        \
+       .max = JH7100_CLK_ENABLE | (_max),                                      \
+       .parents = { [0] = _parent },                                           \
+}
+
+#define JH7100_FDIV(_idx, _name, _parent) [_idx] = {                           \
+       .name = _name,                                                          \
+       .flags = 0,                                                             \
+       .max = JH7100_CLK_FRAC_MAX,                                             \
+       .parents = { [0] = _parent },                                           \
+}
+
+#define JH7100__MUX(_idx, _name, _nparents, ...) [_idx] = {                    \
+       .name = _name,                                                          \
+       .flags = 0,                                                             \
+       .max = ((_nparents) - 1) << JH7100_CLK_MUX_SHIFT,                       \
+       .parents = { __VA_ARGS__ },                                             \
+}
+
+#define JH7100_GMUX(_idx, _name, _flags, _nparents, ...) [_idx] = {            \
+       .name = _name,                                                          \
+       .flags = _flags,                                                        \
+       .max = JH7100_CLK_ENABLE |                                              \
+               (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT),                    \
+       .parents = { __VA_ARGS__ },                                             \
+}
+
+#define JH7100_MDIV(_idx, _name, _max, _nparents, ...) [_idx] = {              \
+       .name = _name,                                                          \
+       .flags = 0,                                                             \
+       .max = (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT) | (_max),            \
+       .parents = { __VA_ARGS__ },                                             \
+}
+
+#define JH7100__GMD(_idx, _name, _flags, _max, _nparents, ...) [_idx] = {      \
+       .name = _name,                                                          \
+       .flags = _flags,                                                        \
+       .max = JH7100_CLK_ENABLE |                                              \
+               (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT) | (_max),           \
+       .parents = { __VA_ARGS__ },                                             \
+}
+
+#define JH7100__INV(_idx, _name, _parent) [_idx] = {                           \
+       .name = _name,                                                          \
+       .flags = CLK_SET_RATE_PARENT,                                           \
+       .max = JH7100_CLK_INVERT,                                               \
+       .parents = { [0] = _parent },                                           \
+}
+
+struct jh7100_clk {
+       struct clk_hw hw;
+       unsigned int idx;
+       unsigned int max_div;
+};
+
+struct jh7100_clk_priv {
+       /* protect clk enable and set rate/parent from happening at the same time */
+       spinlock_t rmw_lock;
+       struct device *dev;
+       void __iomem *base;
+       struct clk_hw *pll[3];
+       struct jh7100_clk reg[];
+};
+
+const struct clk_ops *starfive_jh7100_clk_ops(u32 max);
+
+#endif
index 68a94e5..4615376 100644 (file)
@@ -69,6 +69,11 @@ config SUN6I_A31_CCU
        default MACH_SUN6I
        depends on MACH_SUN6I || COMPILE_TEST
 
+config SUN6I_RTC_CCU
+       tristate "Support for the Allwinner H616/R329 RTC CCU"
+       default ARCH_SUNXI
+       depends on ARCH_SUNXI || COMPILE_TEST
+
 config SUN8I_A23_CCU
        tristate "Support for the Allwinner A23 CCU"
        default MACH_SUN8I
index ec931cb..6b3ae2b 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
 obj-$(CONFIG_SUN4I_A10_CCU)    += sun4i-a10-ccu.o
 obj-$(CONFIG_SUN5I_CCU)                += sun5i-ccu.o
 obj-$(CONFIG_SUN6I_A31_CCU)    += sun6i-a31-ccu.o
+obj-$(CONFIG_SUN6I_RTC_CCU)    += sun6i-rtc-ccu.o
 obj-$(CONFIG_SUN8I_A23_CCU)    += sun8i-a23-ccu.o
 obj-$(CONFIG_SUN8I_A33_CCU)    += sun8i-a33-ccu.o
 obj-$(CONFIG_SUN8I_A83T_CCU)   += sun8i-a83t-ccu.o
@@ -60,6 +61,7 @@ sun50i-h616-ccu-y             += ccu-sun50i-h616.o
 sun4i-a10-ccu-y                        += ccu-sun4i-a10.o
 sun5i-ccu-y                    += ccu-sun5i.o
 sun6i-a31-ccu-y                        += ccu-sun6i-a31.o
+sun6i-rtc-ccu-y                        += ccu-sun6i-rtc.o
 sun8i-a23-ccu-y                        += ccu-sun8i-a23.o
 sun8i-a33-ccu-y                        += ccu-sun8i-a33.o
 sun8i-a83t-ccu-y               += ccu-sun8i-a83t.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
new file mode 100644 (file)
index 0000000..8a10bad
--- /dev/null
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 Samuel Holland <samuel@sholland.org>
+//
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <linux/clk/sunxi-ng.h>
+
+#include "ccu_common.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mux.h"
+
+#include "ccu-sun6i-rtc.h"
+
+#define IOSC_ACCURACY                  300000000 /* 30% */
+#define IOSC_RATE                      16000000
+
+#define LOSC_RATE                      32768
+#define LOSC_RATE_SHIFT                        15
+
+#define LOSC_CTRL_REG                  0x0
+#define LOSC_CTRL_KEY                  0x16aa0000
+
+#define IOSC_32K_CLK_DIV_REG           0x8
+#define IOSC_32K_CLK_DIV               GENMASK(4, 0)
+#define IOSC_32K_PRE_DIV               32
+
+#define IOSC_CLK_CALI_REG              0xc
+#define IOSC_CLK_CALI_DIV_ONES         22
+#define IOSC_CLK_CALI_EN               BIT(1)
+#define IOSC_CLK_CALI_SRC_SEL          BIT(0)
+
+#define LOSC_OUT_GATING_REG            0x60
+
+#define DCXO_CTRL_REG                  0x160
+#define DCXO_CTRL_CLK16M_RC_EN         BIT(0)
+
+struct sun6i_rtc_match_data {
+       bool                            have_ext_osc32k         : 1;
+       bool                            have_iosc_calibration   : 1;
+       bool                            rtc_32k_single_parent   : 1;
+       const struct clk_parent_data    *osc32k_fanout_parents;
+       u8                              osc32k_fanout_nparents;
+};
+
+static bool have_iosc_calibration;
+
+static int ccu_iosc_enable(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_enable(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static void ccu_iosc_disable(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_disable(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static int ccu_iosc_is_enabled(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_is_enabled(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static unsigned long ccu_iosc_recalc_rate(struct clk_hw *hw,
+                                         unsigned long parent_rate)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       if (have_iosc_calibration) {
+               u32 reg = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /*
+                * Recover the IOSC frequency by shifting the ones place of
+                * (fixed-point divider * 32768) into bit zero.
+                */
+               if (reg & IOSC_CLK_CALI_EN)
+                       return reg >> (IOSC_CLK_CALI_DIV_ONES - LOSC_RATE_SHIFT);
+       }
+
+       return IOSC_RATE;
+}
+
+static unsigned long ccu_iosc_recalc_accuracy(struct clk_hw *hw,
+                                             unsigned long parent_accuracy)
+{
+       return IOSC_ACCURACY;
+}
+
+static const struct clk_ops ccu_iosc_ops = {
+       .enable                 = ccu_iosc_enable,
+       .disable                = ccu_iosc_disable,
+       .is_enabled             = ccu_iosc_is_enabled,
+       .recalc_rate            = ccu_iosc_recalc_rate,
+       .recalc_accuracy        = ccu_iosc_recalc_accuracy,
+};
+
+static struct ccu_common iosc_clk = {
+       .reg            = DCXO_CTRL_REG,
+       .hw.init        = CLK_HW_INIT_NO_PARENT("iosc", &ccu_iosc_ops,
+                                               CLK_GET_RATE_NOCACHE),
+};
+
+static int ccu_iosc_32k_prepare(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (!have_iosc_calibration)
+               return 0;
+
+       val = readl(cm->base + IOSC_CLK_CALI_REG);
+       writel(val | IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL,
+              cm->base + IOSC_CLK_CALI_REG);
+
+       return 0;
+}
+
+static void ccu_iosc_32k_unprepare(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (!have_iosc_calibration)
+               return;
+
+       val = readl(cm->base + IOSC_CLK_CALI_REG);
+       writel(val & ~(IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL),
+              cm->base + IOSC_CLK_CALI_REG);
+}
+
+static unsigned long ccu_iosc_32k_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (have_iosc_calibration) {
+               val = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /* Assume the calibrated 32k clock is accurate. */
+               if (val & IOSC_CLK_CALI_SRC_SEL)
+                       return LOSC_RATE;
+       }
+
+       val = readl(cm->base + IOSC_32K_CLK_DIV_REG) & IOSC_32K_CLK_DIV;
+
+       return parent_rate / IOSC_32K_PRE_DIV / (val + 1);
+}
+
+static unsigned long ccu_iosc_32k_recalc_accuracy(struct clk_hw *hw,
+                                                 unsigned long parent_accuracy)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (have_iosc_calibration) {
+               val = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /* Assume the calibrated 32k clock is accurate. */
+               if (val & IOSC_CLK_CALI_SRC_SEL)
+                       return 0;
+       }
+
+       return parent_accuracy;
+}
+
+static const struct clk_ops ccu_iosc_32k_ops = {
+       .prepare                = ccu_iosc_32k_prepare,
+       .unprepare              = ccu_iosc_32k_unprepare,
+       .recalc_rate            = ccu_iosc_32k_recalc_rate,
+       .recalc_accuracy        = ccu_iosc_32k_recalc_accuracy,
+};
+
+static struct ccu_common iosc_32k_clk = {
+       .hw.init        = CLK_HW_INIT_HW("iosc-32k", &iosc_clk.hw,
+                                        &ccu_iosc_32k_ops,
+                                        CLK_GET_RATE_NOCACHE),
+};
+
+static const struct clk_hw *ext_osc32k[] = { NULL }; /* updated during probe */
+
+static SUNXI_CCU_GATE_HWS(ext_osc32k_gate_clk, "ext-osc32k-gate",
+                         ext_osc32k, 0x0, BIT(4), 0);
+
+static const struct clk_hw *osc32k_parents[] = {
+       &iosc_32k_clk.hw,
+       &ext_osc32k_gate_clk.common.hw
+};
+
+static struct clk_init_data osc32k_init_data = {
+       .name           = "osc32k",
+       .ops            = &ccu_mux_ops,
+       .parent_hws     = osc32k_parents,
+       .num_parents    = ARRAY_SIZE(osc32k_parents), /* updated during probe */
+};
+
+static struct ccu_mux osc32k_clk = {
+       .mux    = _SUNXI_CCU_MUX(0, 1),
+       .common = {
+               .reg            = LOSC_CTRL_REG,
+               .features       = CCU_FEATURE_KEY_FIELD,
+               .hw.init        = &osc32k_init_data,
+       },
+};
+
+/* This falls back to the global name for fwnodes without a named reference. */
+static const struct clk_parent_data osc24M[] = {
+       { .fw_name = "hosc", .name = "osc24M" }
+};
+
+static struct ccu_gate osc24M_32k_clk = {
+       .enable = BIT(16),
+       .common = {
+               .reg            = LOSC_OUT_GATING_REG,
+               .prediv         = 750,
+               .features       = CCU_FEATURE_ALL_PREDIV,
+               .hw.init        = CLK_HW_INIT_PARENTS_DATA("osc24M-32k", osc24M,
+                                                          &ccu_gate_ops, 0),
+       },
+};
+
+static const struct clk_hw *rtc_32k_parents[] = {
+       &osc32k_clk.common.hw,
+       &osc24M_32k_clk.common.hw
+};
+
+static struct clk_init_data rtc_32k_init_data = {
+       .name           = "rtc-32k",
+       .ops            = &ccu_mux_ops,
+       .parent_hws     = rtc_32k_parents,
+       .num_parents    = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */
+};
+
+static struct ccu_mux rtc_32k_clk = {
+       .mux    = _SUNXI_CCU_MUX(1, 1),
+       .common = {
+               .reg            = LOSC_CTRL_REG,
+               .features       = CCU_FEATURE_KEY_FIELD,
+               .hw.init        = &rtc_32k_init_data,
+       },
+};
+
+static struct clk_init_data osc32k_fanout_init_data = {
+       .name           = "osc32k-fanout",
+       .ops            = &ccu_mux_ops,
+       /* parents are set during probe */
+};
+
+static struct ccu_mux osc32k_fanout_clk = {
+       .enable = BIT(0),
+       .mux    = _SUNXI_CCU_MUX(1, 2),
+       .common = {
+               .reg            = LOSC_OUT_GATING_REG,
+               .hw.init        = &osc32k_fanout_init_data,
+       },
+};
+
+static struct ccu_common *sun6i_rtc_ccu_clks[] = {
+       &iosc_clk,
+       &iosc_32k_clk,
+       &ext_osc32k_gate_clk.common,
+       &osc32k_clk.common,
+       &osc24M_32k_clk.common,
+       &rtc_32k_clk.common,
+       &osc32k_fanout_clk.common,
+};
+
+static struct clk_hw_onecell_data sun6i_rtc_ccu_hw_clks = {
+       .num = CLK_NUMBER,
+       .hws = {
+               [CLK_OSC32K]            = &osc32k_clk.common.hw,
+               [CLK_OSC32K_FANOUT]     = &osc32k_fanout_clk.common.hw,
+               [CLK_IOSC]              = &iosc_clk.hw,
+               [CLK_IOSC_32K]          = &iosc_32k_clk.hw,
+               [CLK_EXT_OSC32K_GATE]   = &ext_osc32k_gate_clk.common.hw,
+               [CLK_OSC24M_32K]        = &osc24M_32k_clk.common.hw,
+               [CLK_RTC_32K]           = &rtc_32k_clk.common.hw,
+       },
+};
+
+static const struct sunxi_ccu_desc sun6i_rtc_ccu_desc = {
+       .ccu_clks       = sun6i_rtc_ccu_clks,
+       .num_ccu_clks   = ARRAY_SIZE(sun6i_rtc_ccu_clks),
+
+       .hw_clks        = &sun6i_rtc_ccu_hw_clks,
+};
+
+static const struct clk_parent_data sun50i_h6_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+};
+
+static const struct clk_parent_data sun50i_h616_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+       { .fw_name = "pll-32k" },
+       { .hw = &osc24M_32k_clk.common.hw }
+};
+
+static const struct clk_parent_data sun50i_r329_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+       { .hw = &ext_osc32k_gate_clk.common.hw },
+       { .hw = &osc24M_32k_clk.common.hw }
+};
+
+static const struct sun6i_rtc_match_data sun50i_h6_rtc_ccu_data = {
+       .have_ext_osc32k        = true,
+       .have_iosc_calibration  = true,
+       .osc32k_fanout_parents  = sun50i_h6_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h6_osc32k_fanout_parents),
+};
+
+static const struct sun6i_rtc_match_data sun50i_h616_rtc_ccu_data = {
+       .have_iosc_calibration  = true,
+       .rtc_32k_single_parent  = true,
+       .osc32k_fanout_parents  = sun50i_h616_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h616_osc32k_fanout_parents),
+};
+
+static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
+       .have_ext_osc32k        = true,
+       .osc32k_fanout_parents  = sun50i_r329_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
+};
+
+static const struct of_device_id sun6i_rtc_ccu_match[] = {
+       {
+               .compatible     = "allwinner,sun50i-h6-rtc",
+               .data           = &sun50i_h6_rtc_ccu_data,
+       },
+       {
+               .compatible     = "allwinner,sun50i-h616-rtc",
+               .data           = &sun50i_h616_rtc_ccu_data,
+       },
+       {
+               .compatible     = "allwinner,sun50i-r329-rtc",
+               .data           = &sun50i_r329_rtc_ccu_data,
+       },
+};
+
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
+{
+       const struct sun6i_rtc_match_data *data;
+       struct clk *ext_osc32k_clk = NULL;
+       const struct of_device_id *match;
+
+       /* This driver is only used for newer variants of the hardware. */
+       match = of_match_device(sun6i_rtc_ccu_match, dev);
+       if (!match)
+               return 0;
+
+       data = match->data;
+       have_iosc_calibration = data->have_iosc_calibration;
+
+       if (data->have_ext_osc32k) {
+               const char *fw_name;
+
+               /* ext-osc32k was the only input clock in the old binding. */
+               fw_name = of_property_read_bool(dev->of_node, "clock-names")
+                       ? "ext-osc32k" : NULL;
+               ext_osc32k_clk = devm_clk_get_optional(dev, fw_name);
+               if (IS_ERR(ext_osc32k_clk))
+                       return PTR_ERR(ext_osc32k_clk);
+       }
+
+       if (ext_osc32k_clk) {
+               /* Link ext-osc32k-gate to its parent. */
+               *ext_osc32k = __clk_get_hw(ext_osc32k_clk);
+       } else {
+               /* ext-osc32k-gate is an orphan, so do not register it. */
+               sun6i_rtc_ccu_hw_clks.hws[CLK_EXT_OSC32K_GATE] = NULL;
+               osc32k_init_data.num_parents = 1;
+       }
+
+       if (data->rtc_32k_single_parent)
+               rtc_32k_init_data.num_parents = 1;
+
+       osc32k_fanout_init_data.parent_data = data->osc32k_fanout_parents;
+       osc32k_fanout_init_data.num_parents = data->osc32k_fanout_nparents;
+
+       return devm_sunxi_ccu_probe(dev, reg, &sun6i_rtc_ccu_desc);
+}
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h
new file mode 100644 (file)
index 0000000..9ae821f
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CCU_SUN6I_RTC_H
+#define _CCU_SUN6I_RTC_H
+
+#include <dt-bindings/clock/sun6i-rtc.h>
+
+#define CLK_IOSC_32K           3
+#define CLK_EXT_OSC32K_GATE    4
+#define CLK_OSC24M_32K         5
+#define CLK_RTC_32K            6
+
+#define CLK_NUMBER             (CLK_RTC_32K + 1)
+
+#endif /* _CCU_SUN6I_RTC_H */
index 98a1834..fbf16c6 100644 (file)
@@ -17,6 +17,7 @@
 #define CCU_FEATURE_LOCK_REG           BIT(5)
 #define CCU_FEATURE_MMC_TIMING_SWITCH  BIT(6)
 #define CCU_FEATURE_SIGMA_DELTA_MOD    BIT(7)
+#define CCU_FEATURE_KEY_FIELD          BIT(8)
 
 /* MMC timing mode switch bit */
 #define CCU_MMC_NEW_TIMING_MODE                BIT(30)
index 2306a1c..1d557e3 100644 (file)
@@ -12,6 +12,8 @@
 #include "ccu_gate.h"
 #include "ccu_mux.h"
 
+#define CCU_MUX_KEY_VALUE              0x16aa0000
+
 static u16 ccu_mux_get_prediv(struct ccu_common *common,
                              struct ccu_mux_internal *cm,
                              int parent_index)
@@ -191,6 +193,11 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
        spin_lock_irqsave(common->lock, flags);
 
        reg = readl(common->base + common->reg);
+
+       /* The key field always reads as zero. */
+       if (common->features & CCU_FEATURE_KEY_FIELD)
+               reg |= CCU_MUX_KEY_VALUE;
+
        reg &= ~GENMASK(cm->width + cm->shift - 1, cm->shift);
        writel(reg | (index << cm->shift), common->base + common->reg);
 
index 74c1d89..219c806 100644 (file)
@@ -198,6 +198,7 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
 
        tegra->emc = platform_get_drvdata(pdev);
        if (!tegra->emc) {
+               put_device(&pdev->dev);
                pr_err("%s: cannot find EMC driver\n", __func__);
                return NULL;
        }
index 5ca1e39..2c6315c 100644 (file)
@@ -6,8 +6,7 @@ clk-common                              = dpll.o composite.o divider.o gate.o \
                                          fixed-factor.o mux.o apll.o \
                                          clkt_dpll.o clkt_iclk.o clkt_dflt.o \
                                          clkctrl.o
-obj-$(CONFIG_SOC_AM33XX)               += $(clk-common) clk-33xx.o dpll3xxx.o \
-                                         clk-33xx-compat.o
+obj-$(CONFIG_SOC_AM33XX)               += $(clk-common) clk-33xx.o dpll3xxx.o
 obj-$(CONFIG_SOC_TI81XX)               += $(clk-common) fapll.o clk-814x.o clk-816x.o
 obj-$(CONFIG_ARCH_OMAP2)               += $(clk-common) interface.o clk-2xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += $(clk-common) interface.o \
@@ -18,9 +17,9 @@ obj-$(CONFIG_SOC_OMAP5)                       += $(clk-common) clk-54xx.o \
                                           dpll3xxx.o dpll44xx.o
 obj-$(CONFIG_SOC_DRA7XX)               += $(clk-common) clk-7xx.o \
                                           clk-dra7-atl.o dpll3xxx.o \
-                                          dpll44xx.o clk-7xx-compat.o
-obj-$(CONFIG_SOC_AM43XX)               += $(clk-common) dpll3xxx.o clk-43xx.o \
-                                          clk-43xx-compat.o
+                                          dpll44xx.o
+
+obj-$(CONFIG_SOC_AM43XX)               += $(clk-common) dpll3xxx.o clk-43xx.o
 
 endif  # CONFIG_ARCH_OMAP2PLUS
 
index ac5bc88..e4db6b9 100644 (file)
@@ -139,6 +139,7 @@ static void __init omap_clk_register_apll(void *user,
        struct clk_hw *hw = user;
        struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
        struct dpll_data *ad = clk_hw->dpll_data;
+       const char *name;
        struct clk *clk;
        const struct clk_init_data *init = clk_hw->hw.init;
 
@@ -166,7 +167,8 @@ static void __init omap_clk_register_apll(void *user,
 
        ad->clk_bypass = __clk_get_hw(clk);
 
-       clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
+       name = ti_dt_clk_name(node);
+       clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
                kfree(init->parent_names);
@@ -198,7 +200,7 @@ static void __init of_dra7_apll_setup(struct device_node *node)
        clk_hw->dpll_data = ad;
        clk_hw->hw.init = init;
 
-       init->name = node->name;
+       init->name = ti_dt_clk_name(node);
        init->ops = &apll_ck_ops;
 
        init->num_parents = of_clk_get_parent_count(node);
@@ -347,6 +349,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
        struct dpll_data *ad = NULL;
        struct clk_hw_omap *clk_hw = NULL;
        struct clk_init_data *init = NULL;
+       const char *name;
        struct clk *clk;
        const char *parent_name;
        u32 val;
@@ -362,7 +365,8 @@ static void __init of_omap2_apll_setup(struct device_node *node)
        clk_hw->dpll_data = ad;
        clk_hw->hw.init = init;
        init->ops = &omap2_apll_ops;
-       init->name = node->name;
+       name = ti_dt_clk_name(node);
+       init->name = name;
        clk_hw->ops = &omap2_apll_hwops;
 
        init->num_parents = of_clk_get_parent_count(node);
@@ -403,7 +407,8 @@ static void __init of_omap2_apll_setup(struct device_node *node)
        if (ret)
                goto cleanup;
 
-       clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
+       name = ti_dt_clk_name(node);
+       clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
                kfree(init);
index f6f8a40..d6e5f15 100644 (file)
@@ -205,7 +205,7 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node)
                return -ENOMEM;
 
        clk->shift = shift;
-       clk->name = node->name;
+       clk->name = ti_dt_clk_name(node);
        ret = ti_clk_get_reg_addr(node, 0, &clk->reg);
        if (ret) {
                kfree(clk);
diff --git a/drivers/clk/ti/clk-33xx-compat.c b/drivers/clk/ti/clk-33xx-compat.c
deleted file mode 100644 (file)
index 3e07f12..0000000
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * AM33XX Clock init
- *
- * Copyright (C) 2013 Texas Instruments, Inc
- *     Tero Kristo (t-kristo@ti.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/clk/ti.h>
-#include <dt-bindings/clock/am3.h>
-
-#include "clock.h"
-
-static const char * const am3_gpio1_dbclk_parents[] __initconst = {
-       "l4_per_cm:clk:0138:0",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data am3_gpio2_bit_data[] __initconst = {
-       { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data am3_gpio3_bit_data[] __initconst = {
-       { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {
-       { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {
-       { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
-       { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" },
-       { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },
-       { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" },
-       { AM3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM3_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
-       { AM3_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" },
-       { AM3_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
-       { AM3_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" },
-       { AM3_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM3_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
-       { AM3_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
-       { AM3_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
-       { AM3_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
-       { AM3_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
-       { AM3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" },
-       { AM3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM3_GPIO2_CLKCTRL, am3_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_GPIO3_CLKCTRL, am3_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_GPIO4_CLKCTRL, am3_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM3_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
-       { AM3_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
-       { AM3_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM3_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" },
-       { AM3_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
-       { AM3_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
-       { AM3_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
-       { AM3_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" },
-       { AM3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM3_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l4hs_clkdm" },
-       { AM3_OCPWP_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM3_CLKDIV32K_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ck", "clk_24mhz_clkdm" },
-       { 0 },
-};
-
-static const char * const am3_gpio0_dbclk_parents[] __initconst = {
-       "gpio0_dbclk_mux_ck",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data am3_gpio1_bit_data[] __initconst = {
-       { 18, TI_CLK_GATE, am3_gpio0_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const char * const am3_dbg_sysclk_ck_parents[] __initconst = {
-       "sys_clkin_ck",
-       NULL,
-};
-
-static const char * const am3_trace_pmd_clk_mux_ck_parents[] __initconst = {
-       "l4_wkup_cm:clk:0010:19",
-       "l4_wkup_cm:clk:0010:30",
-       NULL,
-};
-
-static const char * const am3_trace_clk_div_ck_parents[] __initconst = {
-       "l4_wkup_cm:clk:0010:20",
-       NULL,
-};
-
-static const struct omap_clkctrl_div_data am3_trace_clk_div_ck_data __initconst = {
-       .max_div = 64,
-       .flags = CLK_DIVIDER_POWER_OF_TWO,
-};
-
-static const char * const am3_stm_clk_div_ck_parents[] __initconst = {
-       "l4_wkup_cm:clk:0010:22",
-       NULL,
-};
-
-static const struct omap_clkctrl_div_data am3_stm_clk_div_ck_data __initconst = {
-       .max_div = 64,
-       .flags = CLK_DIVIDER_POWER_OF_TWO,
-};
-
-static const char * const am3_dbg_clka_ck_parents[] __initconst = {
-       "dpll_core_m4_ck",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data am3_debugss_bit_data[] __initconst = {
-       { 19, TI_CLK_GATE, am3_dbg_sysclk_ck_parents, NULL },
-       { 20, TI_CLK_MUX, am3_trace_pmd_clk_mux_ck_parents, NULL },
-       { 22, TI_CLK_MUX, am3_trace_pmd_clk_mux_ck_parents, NULL },
-       { 24, TI_CLK_DIVIDER, am3_trace_clk_div_ck_parents, &am3_trace_clk_div_ck_data },
-       { 27, TI_CLK_DIVIDER, am3_stm_clk_div_ck_parents, &am3_stm_clk_div_ck_data },
-       { 30, TI_CLK_GATE, am3_dbg_clka_ck_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am3_l4_wkup_clkctrl_regs[] __initconst = {
-       { AM3_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
-       { AM3_GPIO1_CLKCTRL, am3_gpio1_bit_data, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
-       { AM3_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
-       { AM3_DEBUGSS_CLKCTRL, am3_debugss_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0010:24", "l3_aon_clkdm" },
-       { AM3_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "dpll_core_m4_div2_ck", "l4_wkup_aon_clkdm" },
-       { AM3_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
-       { AM3_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
-       { AM3_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" },
-       { AM3_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" },
-       { AM3_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" },
-       { AM3_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" },
-       { AM3_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = {
-       { AM3_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = {
-       { AM3_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am3_gfx_l3_clkctrl_regs[] __initconst = {
-       { AM3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am3_l4_cefuse_clkctrl_regs[] __initconst = {
-       { AM3_CEFUSE_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
-       { 0 },
-};
-
-const struct omap_clkctrl_data am3_clkctrl_compat_data[] __initconst = {
-       { 0x44e00014, am3_l4_per_clkctrl_regs },
-       { 0x44e00404, am3_l4_wkup_clkctrl_regs },
-       { 0x44e00604, am3_mpu_clkctrl_regs },
-       { 0x44e00800, am3_l4_rtc_clkctrl_regs },
-       { 0x44e00904, am3_gfx_l3_clkctrl_regs },
-       { 0x44e00a20, am3_l4_cefuse_clkctrl_regs },
-       { 0 },
-};
-
-struct ti_dt_clk am33xx_compat_clks[] = {
-       DT_CLK(NULL, "timer_32k_ck", "l4_per_cm:0138:0"),
-       DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
-       DT_CLK(NULL, "clkdiv32k_ick", "l4_per_cm:0138:0"),
-       DT_CLK(NULL, "dbg_clka_ck", "l4_wkup_cm:0010:30"),
-       DT_CLK(NULL, "dbg_sysclk_ck", "l4_wkup_cm:0010:19"),
-       DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0004:18"),
-       DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0098:18"),
-       DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:009c:18"),
-       DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:00a0:18"),
-       DT_CLK(NULL, "stm_clk_div_ck", "l4_wkup_cm:0010:27"),
-       DT_CLK(NULL, "stm_pmd_clock_mux_ck", "l4_wkup_cm:0010:22"),
-       DT_CLK(NULL, "trace_clk_div_ck", "l4_wkup_cm:0010:24"),
-       DT_CLK(NULL, "trace_pmd_clk_mux_ck", "l4_wkup_cm:0010:20"),
-       { .node_name = NULL },
-};
index f2c2212..b4d142a 100644 (file)
@@ -279,10 +279,7 @@ int __init am33xx_dt_clk_init(void)
 {
        struct clk *clk1, *clk2;
 
-       if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-               ti_dt_clocks_register(am33xx_compat_clks);
-       else
-               ti_dt_clocks_register(am33xx_clks);
+       ti_dt_clocks_register(am33xx_clks);
 
        omap2_clk_disable_autoidle_all();
 
diff --git a/drivers/clk/ti/clk-43xx-compat.c b/drivers/clk/ti/clk-43xx-compat.c
deleted file mode 100644 (file)
index 5130398..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * AM43XX Clock init
- *
- * Copyright (C) 2013 Texas Instruments, Inc
- *     Tero Kristo (t-kristo@ti.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/clk/ti.h>
-#include <dt-bindings/clock/am4.h>
-
-#include "clock.h"
-
-static const char * const am4_synctimer_32kclk_parents[] __initconst = {
-       "mux_synctimer32k_ck",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data am4_counter_32k_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_synctimer_32kclk_parents, NULL },
-       { 0 },
-};
-
-static const char * const am4_gpio0_dbclk_parents[] __initconst = {
-       "gpio0_dbclk_mux_ck",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data am4_gpio1_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_gpio0_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am4_l4_wkup_clkctrl_regs[] __initconst = {
-       { AM4_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck", "l3s_tsc_clkdm" },
-       { AM4_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
-       { AM4_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "sys_clkin_ck" },
-       { AM4_COUNTER_32K_CLKCTRL, am4_counter_32k_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0210:8" },
-       { AM4_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck", "l4_wkup_clkdm" },
-       { AM4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck", "l4_wkup_clkdm" },
-       { AM4_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" },
-       { AM4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" },
-       { AM4_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck", "l4_wkup_clkdm" },
-       { AM4_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck", "l4_wkup_clkdm" },
-       { AM4_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
-       { AM4_GPIO1_CLKCTRL, am4_gpio1_bit_data, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am4_mpu_clkctrl_regs[] __initconst = {
-       { AM4_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst = {
-       { AM4_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = {
-       { AM4_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
-       { 0 },
-};
-
-static const char * const am4_usb_otg_ss0_refclk960m_parents[] __initconst = {
-       "dpll_per_clkdcoldo",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data am4_usb_otg_ss0_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_usb_otg_ss0_refclk960m_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data am4_usb_otg_ss1_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_usb_otg_ss0_refclk960m_parents, NULL },
-       { 0 },
-};
-
-static const char * const am4_gpio1_dbclk_parents[] __initconst = {
-       "clkdiv32k_ick",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data am4_gpio2_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data am4_gpio3_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data am4_gpio4_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data am4_gpio5_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data am4_gpio6_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst = {
-       { AM4_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" },
-       { AM4_DES_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_VPFE0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" },
-       { AM4_VPFE1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" },
-       { AM4_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
-       { AM4_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l3_clkdm" },
-       { AM4_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
-       { AM4_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" },
-       { AM4_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" },
-       { AM4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" },
-       { AM4_QSPI_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
-       { AM4_USB_OTG_SS0_CLKCTRL, am4_usb_otg_ss0_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
-       { AM4_USB_OTG_SS1_CLKCTRL, am4_usb_otg_ss1_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
-       { AM4_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" },
-       { AM4_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
-       { AM4_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
-       { AM4_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_EPWMSS3_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_EPWMSS4_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_EPWMSS5_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_GPIO2_CLKCTRL, am4_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_GPIO3_CLKCTRL, am4_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_GPIO4_CLKCTRL, am4_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_GPIO5_CLKCTRL, am4_gpio5_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_GPIO6_CLKCTRL, am4_gpio6_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_clk" },
-       { AM4_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
-       { AM4_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
-       { AM4_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
-       { AM4_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_SPI2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_SPI3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_SPI4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
-       { AM4_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
-       { AM4_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
-       { AM4_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
-       { AM4_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
-       { AM4_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
-       { AM4_TIMER8_CLKCTRL, NULL, CLKF_SW_SUP, "timer8_fck" },
-       { AM4_TIMER9_CLKCTRL, NULL, CLKF_SW_SUP, "timer9_fck" },
-       { AM4_TIMER10_CLKCTRL, NULL, CLKF_SW_SUP, "timer10_fck" },
-       { AM4_TIMER11_CLKCTRL, NULL, CLKF_SW_SUP, "timer11_fck" },
-       { AM4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
-       { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
-       { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" },
-       { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" },
-       { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
-       { 0 },
-};
-
-const struct omap_clkctrl_data am4_clkctrl_compat_data[] __initconst = {
-       { 0x44df2820, am4_l4_wkup_clkctrl_regs },
-       { 0x44df8320, am4_mpu_clkctrl_regs },
-       { 0x44df8420, am4_gfx_l3_clkctrl_regs },
-       { 0x44df8520, am4_l4_rtc_clkctrl_regs },
-       { 0x44df8820, am4_l4_per_clkctrl_regs },
-       { 0 },
-};
-
-const struct omap_clkctrl_data am438x_clkctrl_compat_data[] __initconst = {
-       { 0x44df2820, am4_l4_wkup_clkctrl_regs },
-       { 0x44df8320, am4_mpu_clkctrl_regs },
-       { 0x44df8420, am4_gfx_l3_clkctrl_regs },
-       { 0x44df8820, am4_l4_per_clkctrl_regs },
-       { 0 },
-};
-
-struct ti_dt_clk am43xx_compat_clks[] = {
-       DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
-       DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
-       DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0348:8"),
-       DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0458:8"),
-       DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0460:8"),
-       DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0468:8"),
-       DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0470:8"),
-       DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0478:8"),
-       DT_CLK(NULL, "synctimer_32kclk", "l4_wkup_cm:0210:8"),
-       DT_CLK(NULL, "usb_otg_ss0_refclk960m", "l4_per_cm:0240:8"),
-       DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l4_per_cm:0248:8"),
-       { .node_name = NULL },
-};
index 6e97a54..2ff4ff3 100644 (file)
@@ -282,10 +282,7 @@ int __init am43xx_dt_clk_init(void)
 {
        struct clk *clk1, *clk2;
 
-       if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-               ti_dt_clocks_register(am43xx_compat_clks);
-       else
-               ti_dt_clocks_register(am43xx_clks);
+       ti_dt_clocks_register(am43xx_clks);
 
        omap2_clk_disable_autoidle_all();
 
diff --git a/drivers/clk/ti/clk-7xx-compat.c b/drivers/clk/ti/clk-7xx-compat.c
deleted file mode 100644 (file)
index ddf7c82..0000000
+++ /dev/null
@@ -1,820 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * DRA7 Clock init
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- *
- * Tero Kristo (t-kristo@ti.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk/ti.h>
-#include <dt-bindings/clock/dra7.h>
-
-#include "clock.h"
-
-#define DRA7_DPLL_GMAC_DEFFREQ                         1000000000
-#define DRA7_DPLL_USB_DEFFREQ                          960000000
-
-static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst = {
-       { DRA7_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" },
-       { 0 },
-};
-
-static const char * const dra7_mcasp1_aux_gfclk_mux_parents[] __initconst = {
-       "per_abe_x1_gfclk2_div",
-       "video1_clk2_div",
-       "video2_clk2_div",
-       "hdmi_clk2_div",
-       NULL,
-};
-
-static const char * const dra7_mcasp1_ahclkx_mux_parents[] __initconst = {
-       "abe_24m_fclk",
-       "abe_sys_clk_div",
-       "func_24m_clk",
-       "atl_clkin3_ck",
-       "atl_clkin2_ck",
-       "atl_clkin1_ck",
-       "atl_clkin0_ck",
-       "sys_clkin2",
-       "ref_clkin0_ck",
-       "ref_clkin1_ck",
-       "ref_clkin2_ck",
-       "ref_clkin3_ck",
-       "mlb_clk",
-       "mlbp_clk",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_mcasp1_bit_data[] __initconst = {
-       { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 0 },
-};
-
-static const char * const dra7_timer5_gfclk_mux_parents[] __initconst = {
-       "timer_sys_clk_div",
-       "sys_32k_ck",
-       "sys_clkin2",
-       "ref_clkin0_ck",
-       "ref_clkin1_ck",
-       "ref_clkin2_ck",
-       "ref_clkin3_ck",
-       "abe_giclk_div",
-       "video1_div_clk",
-       "video2_div_clk",
-       "hdmi_div_clk",
-       "clkoutmux0_clk_mux",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer5_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer6_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer7_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer8_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const char * const dra7_uart6_gfclk_mux_parents[] __initconst = {
-       "func_48m_fclk",
-       "dpll_per_m2x2_ck",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart6_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst = {
-       { DRA7_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0010:22" },
-       { DRA7_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0018:24" },
-       { DRA7_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0020:24" },
-       { DRA7_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0028:24" },
-       { DRA7_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0030:24" },
-       { DRA7_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
-       { DRA7_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0040:24" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst = {
-       { DRA7_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = {
-       { DRA7_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
-       { DRA7_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initconst = {
-       { DRA7_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_dma_clkctrl_regs[] __initconst = {
-       { DRA7_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_emif_clkctrl_regs[] __initconst = {
-       { DRA7_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { 0 },
-};
-
-static const char * const dra7_atl_dpll_clk_mux_parents[] __initconst = {
-       "sys_32k_ck",
-       "video1_clkin_ck",
-       "video2_clkin_ck",
-       "hdmi_clkin_ck",
-       NULL,
-};
-
-static const char * const dra7_atl_gfclk_mux_parents[] __initconst = {
-       "l3_iclk_div",
-       "dpll_abe_m2_ck",
-       "atl_cm:clk:0000:24",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_atl_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_atl_dpll_clk_mux_parents, NULL },
-       { 26, TI_CLK_MUX, dra7_atl_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_atl_clkctrl_regs[] __initconst = {
-       { DRA7_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl_cm:clk:0000:26" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_l4cfg_clkctrl_regs[] __initconst = {
-       { DRA7_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initconst = {
-       { DRA7_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
-       { 0 },
-};
-
-static const char * const dra7_dss_dss_clk_parents[] __initconst = {
-       "dpll_per_h12x2_ck",
-       NULL,
-};
-
-static const char * const dra7_dss_48mhz_clk_parents[] __initconst = {
-       "func_48m_fclk",
-       NULL,
-};
-
-static const char * const dra7_dss_hdmi_clk_parents[] __initconst = {
-       "hdmi_dpll_clk_mux",
-       NULL,
-};
-
-static const char * const dra7_dss_32khz_clk_parents[] __initconst = {
-       "sys_32k_ck",
-       NULL,
-};
-
-static const char * const dra7_dss_video1_clk_parents[] __initconst = {
-       "video1_dpll_clk_mux",
-       NULL,
-};
-
-static const char * const dra7_dss_video2_clk_parents[] __initconst = {
-       "video2_dpll_clk_mux",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_dss_core_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_dss_clk_parents, NULL },
-       { 9, TI_CLK_GATE, dra7_dss_48mhz_clk_parents, NULL },
-       { 10, TI_CLK_GATE, dra7_dss_hdmi_clk_parents, NULL },
-       { 11, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 12, TI_CLK_GATE, dra7_dss_video1_clk_parents, NULL },
-       { 13, TI_CLK_GATE, dra7_dss_video2_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst = {
-       { DRA7_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" },
-       { DRA7_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" },
-       { 0 },
-};
-
-static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = {
-       "func_128m_clk",
-       "dpll_per_m2x2_ck",
-       NULL,
-};
-
-static const char * const dra7_mmc1_fclk_div_parents[] __initconst = {
-       "l3init_cm:clk:0008:24",
-       NULL,
-};
-
-static const struct omap_clkctrl_div_data dra7_mmc1_fclk_div_data __initconst = {
-       .max_div = 4,
-       .flags = CLK_DIVIDER_POWER_OF_TWO,
-};
-
-static const struct omap_clkctrl_bit_data dra7_mmc1_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL },
-       { 25, TI_CLK_DIVIDER, dra7_mmc1_fclk_div_parents, &dra7_mmc1_fclk_div_data },
-       { 0 },
-};
-
-static const char * const dra7_mmc2_fclk_div_parents[] __initconst = {
-       "l3init_cm:clk:0010:24",
-       NULL,
-};
-
-static const struct omap_clkctrl_div_data dra7_mmc2_fclk_div_data __initconst = {
-       .max_div = 4,
-       .flags = CLK_DIVIDER_POWER_OF_TWO,
-};
-
-static const struct omap_clkctrl_bit_data dra7_mmc2_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL },
-       { 25, TI_CLK_DIVIDER, dra7_mmc2_fclk_div_parents, &dra7_mmc2_fclk_div_data },
-       { 0 },
-};
-
-static const char * const dra7_usb_otg_ss2_refclk960m_parents[] __initconst = {
-       "l3init_960m_gfclk",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_usb_otg_ss2_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL },
-       { 0 },
-};
-
-static const char * const dra7_sata_ref_clk_parents[] __initconst = {
-       "sys_clkin1",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_sata_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_sata_ref_clk_parents, NULL },
-       { 0 },
-};
-
-static const char * const dra7_optfclk_pciephy1_clk_parents[] __initconst = {
-       "apll_pcie_ck",
-       NULL,
-};
-
-static const char * const dra7_optfclk_pciephy1_div_clk_parents[] __initconst = {
-       "optfclk_pciephy_div",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_pcie1_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL },
-       { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_pcie2_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL },
-       { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL },
-       { 0 },
-};
-
-static const char * const dra7_rmii_50mhz_clk_mux_parents[] __initconst = {
-       "dpll_gmac_h11x2_ck",
-       "rmii_clk_ck",
-       NULL,
-};
-
-static const char * const dra7_gmac_rft_clk_mux_parents[] __initconst = {
-       "video1_clkin_ck",
-       "video2_clkin_ck",
-       "dpll_abe_m2_ck",
-       "hdmi_clkin_ck",
-       "l3_iclk_div",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_rmii_50mhz_clk_mux_parents, NULL },
-       { 25, TI_CLK_MUX, dra7_gmac_rft_clk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = {
-       { DRA7_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" },
-       { DRA7_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
-       { DRA7_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
-       { DRA7_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
-       { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_DRA74 | CLKF_SOC_DRA76, "dpll_core_h13x2_ck" },
-       { DRA7_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
-       { DRA7_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
-       { DRA7_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
-       { DRA7_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck", "gmac_clkdm" },
-       { DRA7_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
-       { DRA7_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
-       { DRA7_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
-       { 0 },
-};
-
-static const char * const dra7_timer10_gfclk_mux_parents[] __initconst = {
-       "timer_sys_clk_div",
-       "sys_32k_ck",
-       "sys_clkin2",
-       "ref_clkin0_ck",
-       "ref_clkin1_ck",
-       "ref_clkin2_ck",
-       "ref_clkin3_ck",
-       "abe_giclk_div",
-       "video1_div_clk",
-       "video2_div_clk",
-       "hdmi_div_clk",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer10_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer11_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer2_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer3_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer4_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer9_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_gpio2_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_gpio3_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_gpio4_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_gpio5_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_gpio6_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_gpio7_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_gpio8_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 0 },
-};
-
-static const char * const dra7_mmc3_gfclk_div_parents[] __initconst = {
-       "l4per_cm:clk:0120:24",
-       NULL,
-};
-
-static const struct omap_clkctrl_div_data dra7_mmc3_gfclk_div_data __initconst = {
-       .max_div = 4,
-       .flags = CLK_DIVIDER_POWER_OF_TWO,
-};
-
-static const struct omap_clkctrl_bit_data dra7_mmc3_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 25, TI_CLK_DIVIDER, dra7_mmc3_gfclk_div_parents, &dra7_mmc3_gfclk_div_data },
-       { 0 },
-};
-
-static const char * const dra7_mmc4_gfclk_div_parents[] __initconst = {
-       "l4per_cm:clk:0128:24",
-       NULL,
-};
-
-static const struct omap_clkctrl_div_data dra7_mmc4_gfclk_div_data __initconst = {
-       .max_div = 4,
-       .flags = CLK_DIVIDER_POWER_OF_TWO,
-};
-
-static const struct omap_clkctrl_bit_data dra7_mmc4_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 25, TI_CLK_DIVIDER, dra7_mmc4_gfclk_div_parents, &dra7_mmc4_gfclk_div_data },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const char * const dra7_qspi_gfclk_mux_parents[] __initconst = {
-       "func_128m_clk",
-       "dpll_per_h13x2_ck",
-       NULL,
-};
-
-static const char * const dra7_qspi_gfclk_div_parents[] __initconst = {
-       "l4per_cm:clk:0138:24",
-       NULL,
-};
-
-static const struct omap_clkctrl_div_data dra7_qspi_gfclk_div_data __initconst = {
-       .max_div = 4,
-       .flags = CLK_DIVIDER_POWER_OF_TWO,
-};
-
-static const struct omap_clkctrl_bit_data dra7_qspi_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_qspi_gfclk_mux_parents, NULL },
-       { 25, TI_CLK_DIVIDER, dra7_qspi_gfclk_div_parents, &dra7_qspi_gfclk_div_data },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_mcasp2_bit_data[] __initconst = {
-       { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_mcasp3_bit_data[] __initconst = {
-       { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_mcasp5_bit_data[] __initconst = {
-       { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_mcasp8_bit_data[] __initconst = {
-       { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_mcasp4_bit_data[] __initconst = {
-       { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart7_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart8_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart9_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_mcasp6_bit_data[] __initconst = {
-       { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_mcasp7_bit_data[] __initconst = {
-       { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
-       { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = {
-       { DRA7_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per2_clkdm" },
-       { DRA7_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per3_clkdm" },
-       { DRA7_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" },
-       { DRA7_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" },
-       { DRA7_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0038:24" },
-       { DRA7_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0040:24" },
-       { DRA7_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0048:24" },
-       { DRA7_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0050:24" },
-       { DRA7_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" },
-       { DRA7_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
-       { DRA7_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
-       { DRA7_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
-       { DRA7_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
-       { DRA7_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
-       { DRA7_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
-       { DRA7_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" },
-       { DRA7_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
-       { DRA7_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00c8:24", "l4per3_clkdm" },
-       { DRA7_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d0:24", "l4per3_clkdm" },
-       { DRA7_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d8:24", "l4per3_clkdm" },
-       { DRA7_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
-       { DRA7_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
-       { DRA7_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
-       { DRA7_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
-       { DRA7_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
-       { DRA7_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0120:25" },
-       { DRA7_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0128:25" },
-       { DRA7_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0130:24", "l4per3_clkdm" },
-       { DRA7_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0138:25", "l4per2_clkdm" },
-       { DRA7_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0140:24" },
-       { DRA7_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0148:24" },
-       { DRA7_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0150:24" },
-       { DRA7_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0158:24" },
-       { DRA7_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0160:22", "l4per2_clkdm" },
-       { DRA7_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0168:22", "l4per2_clkdm" },
-       { DRA7_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0170:24" },
-       { DRA7_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0178:22", "l4per2_clkdm" },
-       { DRA7_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0190:24", "l4per2_clkdm" },
-       { DRA7_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0198:22", "l4per2_clkdm" },
-       { DRA7_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
-       { DRA7_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
-       { DRA7_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
-       { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_iclk_div", "l4sec_clkdm" },
-       { DRA7_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
-       { DRA7_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01d0:24", "l4per2_clkdm" },
-       { DRA7_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e0:24", "l4per2_clkdm" },
-       { DRA7_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e8:24", "l4per2_clkdm" },
-       { DRA7_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1", "l4per2_clkdm" },
-       { DRA7_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0204:22", "l4per2_clkdm" },
-       { DRA7_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0208:22", "l4per2_clkdm" },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_gpio1_bit_data[] __initconst = {
-       { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer1_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart10_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
-       { 0 },
-};
-
-static const char * const dra7_dcan1_sys_clk_mux_parents[] __initconst = {
-       "sys_clkin1",
-       "sys_clkin2",
-       NULL,
-};
-
-static const struct omap_clkctrl_bit_data dra7_dcan1_bit_data[] __initconst = {
-       { 24, TI_CLK_MUX, dra7_dcan1_sys_clk_mux_parents, NULL },
-       { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initconst = {
-       { DRA7_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
-       { DRA7_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
-       { DRA7_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
-       { DRA7_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
-       { DRA7_TIMER12_CLKCTRL, NULL, CLKF_SOC_NONSEC, "secure_32k_clk_src_ck" },
-       { DRA7_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
-       { DRA7_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0060:24" },
-       { DRA7_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0068:24" },
-       { DRA7_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk"},
-       { 0 },
-};
-
-const struct omap_clkctrl_data dra7_clkctrl_compat_data[] __initconst = {
-       { 0x4a005320, dra7_mpu_clkctrl_regs },
-       { 0x4a005540, dra7_ipu_clkctrl_regs },
-       { 0x4a005740, dra7_rtc_clkctrl_regs },
-       { 0x4a008620, dra7_coreaon_clkctrl_regs },
-       { 0x4a008720, dra7_l3main1_clkctrl_regs },
-       { 0x4a008a20, dra7_dma_clkctrl_regs },
-       { 0x4a008b20, dra7_emif_clkctrl_regs },
-       { 0x4a008c00, dra7_atl_clkctrl_regs },
-       { 0x4a008d20, dra7_l4cfg_clkctrl_regs },
-       { 0x4a008e20, dra7_l3instr_clkctrl_regs },
-       { 0x4a009120, dra7_dss_clkctrl_regs },
-       { 0x4a009320, dra7_l3init_clkctrl_regs },
-       { 0x4a009700, dra7_l4per_clkctrl_regs },
-       { 0x4ae07820, dra7_wkupaon_clkctrl_regs },
-       { 0 },
-};
-
-struct ti_dt_clk dra7xx_compat_clks[] = {
-       DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
-       DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"),
-       DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
-       DT_CLK(NULL, "atl_dpll_clk_mux", "atl_cm:0000:24"),
-       DT_CLK(NULL, "atl_gfclk_mux", "atl_cm:0000:26"),
-       DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon_cm:0068:24"),
-       DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"),
-       DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"),
-       DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"),
-       DT_CLK(NULL, "dss_hdmi_clk", "dss_cm:0000:10"),
-       DT_CLK(NULL, "dss_video1_clk", "dss_cm:0000:12"),
-       DT_CLK(NULL, "dss_video2_clk", "dss_cm:0000:13"),
-       DT_CLK(NULL, "gmac_rft_clk_mux", "l3init_cm:00b0:25"),
-       DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"),
-       DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0060:8"),
-       DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0068:8"),
-       DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0070:8"),
-       DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0078:8"),
-       DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0080:8"),
-       DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:0110:8"),
-       DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:0118:8"),
-       DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu_cm:0010:28"),
-       DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu_cm:0010:24"),
-       DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu_cm:0010:22"),
-       DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per_cm:0160:28"),
-       DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per_cm:0160:24"),
-       DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per_cm:0160:22"),
-       DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per_cm:0168:24"),
-       DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per_cm:0168:22"),
-       DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per_cm:0198:24"),
-       DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per_cm:0198:22"),
-       DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per_cm:0178:24"),
-       DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per_cm:0178:22"),
-       DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per_cm:0204:24"),
-       DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per_cm:0204:22"),
-       DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per_cm:0208:24"),
-       DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per_cm:0208:22"),
-       DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per_cm:0190:22"),
-       DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per_cm:0190:24"),
-       DT_CLK(NULL, "mmc1_clk32k", "l3init_cm:0008:8"),
-       DT_CLK(NULL, "mmc1_fclk_div", "l3init_cm:0008:25"),
-       DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"),
-       DT_CLK(NULL, "mmc2_clk32k", "l3init_cm:0010:8"),
-       DT_CLK(NULL, "mmc2_fclk_div", "l3init_cm:0010:25"),
-       DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"),
-       DT_CLK(NULL, "mmc3_clk32k", "l4per_cm:0120:8"),
-       DT_CLK(NULL, "mmc3_gfclk_div", "l4per_cm:0120:25"),
-       DT_CLK(NULL, "mmc3_gfclk_mux", "l4per_cm:0120:24"),
-       DT_CLK(NULL, "mmc4_clk32k", "l4per_cm:0128:8"),
-       DT_CLK(NULL, "mmc4_gfclk_div", "l4per_cm:0128:25"),
-       DT_CLK(NULL, "mmc4_gfclk_mux", "l4per_cm:0128:24"),
-       DT_CLK(NULL, "optfclk_pciephy1_32khz", "l3init_cm:0090:8"),
-       DT_CLK(NULL, "optfclk_pciephy1_clk", "l3init_cm:0090:9"),
-       DT_CLK(NULL, "optfclk_pciephy1_div_clk", "l3init_cm:0090:10"),
-       DT_CLK(NULL, "optfclk_pciephy2_32khz", "l3init_cm:0098:8"),
-       DT_CLK(NULL, "optfclk_pciephy2_clk", "l3init_cm:0098:9"),
-       DT_CLK(NULL, "optfclk_pciephy2_div_clk", "l3init_cm:0098:10"),
-       DT_CLK(NULL, "qspi_gfclk_div", "l4per_cm:0138:25"),
-       DT_CLK(NULL, "qspi_gfclk_mux", "l4per_cm:0138:24"),
-       DT_CLK(NULL, "rmii_50mhz_clk_mux", "l3init_cm:00b0:24"),
-       DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"),
-       DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0028:24"),
-       DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0030:24"),
-       DT_CLK(NULL, "timer13_gfclk_mux", "l4per_cm:00c8:24"),
-       DT_CLK(NULL, "timer14_gfclk_mux", "l4per_cm:00d0:24"),
-       DT_CLK(NULL, "timer15_gfclk_mux", "l4per_cm:00d8:24"),
-       DT_CLK(NULL, "timer16_gfclk_mux", "l4per_cm:0130:24"),
-       DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"),
-       DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0038:24"),
-       DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0040:24"),
-       DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0048:24"),
-       DT_CLK(NULL, "timer5_gfclk_mux", "ipu_cm:0018:24"),
-       DT_CLK(NULL, "timer6_gfclk_mux", "ipu_cm:0020:24"),
-       DT_CLK(NULL, "timer7_gfclk_mux", "ipu_cm:0028:24"),
-       DT_CLK(NULL, "timer8_gfclk_mux", "ipu_cm:0030:24"),
-       DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0050:24"),
-       DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon_cm:0060:24"),
-       DT_CLK(NULL, "uart1_gfclk_mux", "l4per_cm:0140:24"),
-       DT_CLK(NULL, "uart2_gfclk_mux", "l4per_cm:0148:24"),
-       DT_CLK(NULL, "uart3_gfclk_mux", "l4per_cm:0150:24"),
-       DT_CLK(NULL, "uart4_gfclk_mux", "l4per_cm:0158:24"),
-       DT_CLK(NULL, "uart5_gfclk_mux", "l4per_cm:0170:24"),
-       DT_CLK(NULL, "uart6_gfclk_mux", "ipu_cm:0040:24"),
-       DT_CLK(NULL, "uart7_gfclk_mux", "l4per_cm:01d0:24"),
-       DT_CLK(NULL, "uart8_gfclk_mux", "l4per_cm:01e0:24"),
-       DT_CLK(NULL, "uart9_gfclk_mux", "l4per_cm:01e8:24"),
-       DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init_cm:00d0:8"),
-       DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init_cm:0020:8"),
-       { .node_name = NULL },
-};
index 8b9118c..0f09944 100644 (file)
@@ -946,10 +946,7 @@ int __init dra7xx_dt_clk_init(void)
        int rc;
        struct clk *dpll_ck, *hdcp_ck;
 
-       if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-               ti_dt_clocks_register(dra7xx_compat_clks);
-       else
-               ti_dt_clocks_register(dra7xx_clks);
+       ti_dt_clocks_register(dra7xx_clks);
 
        omap2_clk_disable_autoidle_all();
 
index 8d4c08b..aa0950c 100644 (file)
@@ -173,6 +173,7 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
        struct dra7_atl_desc *clk_hw = NULL;
        struct clk_init_data init = { NULL };
        const char **parent_names = NULL;
+       const char *name;
        struct clk *clk;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
@@ -183,7 +184,8 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
 
        clk_hw->hw.init = &init;
        clk_hw->divider = 1;
-       init.name = node->name;
+       name = ti_dt_clk_name(node);
+       init.name = name;
        init.ops = &atl_clk_ops;
        init.flags = CLK_IGNORE_UNUSED;
        init.num_parents = of_clk_get_parent_count(node);
@@ -203,7 +205,7 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
 
        init.parent_names = parent_names;
 
-       clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
+       clk = ti_clk_register(NULL, &clk_hw->hw, name);
 
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
index 3da33c7..3463579 100644 (file)
@@ -119,19 +119,58 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
        return 0;
 }
 
+/*
+ * Eventually we could standardize to using '_' for clk-*.c files to follow the
+ * TRM naming and leave out the tmp name here.
+ */
+static struct device_node *ti_find_clock_provider(struct device_node *from,
+                                                 const char *name)
+{
+       struct device_node *np;
+       bool found = false;
+       const char *n;
+       char *tmp;
+
+       tmp = kstrdup(name, GFP_KERNEL);
+       if (!tmp)
+               return NULL;
+       strreplace(tmp, '-', '_');
+
+       /* Node named "clock" with "clock-output-names" */
+       for_each_of_allnodes_from(from, np) {
+               if (of_property_read_string_index(np, "clock-output-names",
+                                                 0, &n))
+                       continue;
+
+               if (!strncmp(n, tmp, strlen(tmp))) {
+                       found = true;
+                       break;
+               }
+       }
+       of_node_put(from);
+       kfree(tmp);
+
+       if (found)
+               return np;
+
+       /* Fall back to using old node name base provider name */
+       return of_find_node_by_name(from, name);
+}
+
 /**
  * ti_dt_clocks_register - register DT alias clocks during boot
  * @oclks: list of clocks to register
  *
  * Register alias or non-standard DT clock entries during boot. By
- * default, DT clocks are found based on their node name. If any
+ * default, DT clocks are found based on their clock-output-names
+ * property, or the clock node name for legacy cases. If any
  * additional con-id / dev-id -> clock mapping is required, use this
  * function to list these.
  */
 void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
 {
        struct ti_dt_clk *c;
-       struct device_node *node, *parent;
+       struct device_node *node, *parent, *child;
        struct clk *clk;
        struct of_phandle_args clkspec;
        char buf[64];
@@ -168,13 +207,16 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
                if (num_args && clkctrl_nodes_missing)
                        continue;
 
-               node = of_find_node_by_name(NULL, buf);
+               node = ti_find_clock_provider(NULL, buf);
                if (num_args && compat_mode) {
                        parent = node;
-                       node = of_get_child_by_name(parent, "clock");
-                       if (!node)
-                               node = of_get_child_by_name(parent, "clk");
-                       of_node_put(parent);
+                       child = of_get_child_by_name(parent, "clock");
+                       if (!child)
+                               child = of_get_child_by_name(parent, "clk");
+                       if (child) {
+                               of_node_put(parent);
+                               node = child;
+                       }
                }
 
                clkspec.np = node;
@@ -271,6 +313,8 @@ int ti_clk_get_reg_addr(struct device_node *node, int index,
        for (i = 0; i < CLK_MAX_MEMMAPS; i++) {
                if (clocks_node_ptr[i] == node->parent)
                        break;
+               if (clocks_node_ptr[i] == node->parent->parent)
+                       break;
        }
 
        if (i == CLK_MAX_MEMMAPS) {
@@ -281,8 +325,12 @@ int ti_clk_get_reg_addr(struct device_node *node, int index,
        reg->index = i;
 
        if (of_property_read_u32_index(node, "reg", index, &val)) {
-               pr_err("%pOFn must have reg[%d]!\n", node, index);
-               return -EINVAL;
+               if (of_property_read_u32_index(node->parent, "reg",
+                                              index, &val)) {
+                       pr_err("%pOFn or parent must have reg[%d]!\n",
+                              node, index);
+                       return -EINVAL;
+               }
        }
 
        reg->offset = val;
@@ -399,6 +447,24 @@ static const struct of_device_id simple_clk_match_table[] __initconst = {
        { }
 };
 
+/**
+ * ti_dt_clk_name - init clock name from first output name or node name
+ * @np: device node
+ *
+ * Use the first clock-output-name for the clock name if found. Fall back
+ * to legacy naming based on node name.
+ */
+const char *ti_dt_clk_name(struct device_node *np)
+{
+       const char *name;
+
+       if (!of_property_read_string_index(np, "clock-output-names", 0,
+                                          &name))
+               return name;
+
+       return np->name;
+}
+
 /**
  * ti_clk_add_aliases - setup clock aliases
  *
@@ -415,7 +481,7 @@ void __init ti_clk_add_aliases(void)
                clkspec.np = np;
                clk = of_clk_get_from_provider(&clkspec);
 
-               ti_clk_add_alias(NULL, clk, np->name);
+               ti_clk_add_alias(NULL, clk, ti_dt_clk_name(np));
        }
 }
 
index 864c484..064066e 100644 (file)
@@ -469,14 +469,32 @@ static void __init _clkctrl_add_provider(void *data,
        of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
 }
 
-/* Get clock name based on compatible string for clkctrl */
-static char * __init clkctrl_get_name(struct device_node *np)
+/*
+ * Get clock name based on "clock-output-names" property or the
+ * compatible property for clkctrl.
+ */
+static const char * __init clkctrl_get_name(struct device_node *np)
 {
        struct property *prop;
        const int prefix_len = 11;
        const char *compat;
+       const char *output;
        char *name;
 
+       if (!of_property_read_string_index(np, "clock-output-names", 0,
+                                          &output)) {
+               const char *end;
+               int len;
+
+               len = strlen(output);
+               end = strstr(output, "_clkctrl");
+               if (end)
+                       len -= strlen(end);
+               name = kstrndup(output, len, GFP_KERNEL);
+
+               return name;
+       }
+
        of_property_for_each_string(np, "compatible", prop, compat) {
                if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
                        /* Two letter minimum name length for l3, l4 etc */
@@ -505,7 +523,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
        struct omap_clkctrl_clk *clkctrl_clk = NULL;
        const __be32 *addrp;
        bool legacy_naming;
-       char *clkctrl_name;
+       const char *clkctrl_name;
        u32 addr;
        int ret;
        char *c;
@@ -527,13 +545,8 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
                data = omap5_clkctrl_data;
 #endif
 #ifdef CONFIG_SOC_DRA7XX
-       if (of_machine_is_compatible("ti,dra7")) {
-               if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-                       data = dra7_clkctrl_compat_data;
-               else
-                       data = dra7_clkctrl_data;
-       }
-
+       if (of_machine_is_compatible("ti,dra7"))
+               data = dra7_clkctrl_data;
        if (of_machine_is_compatible("ti,dra72"))
                soc_mask = CLKF_SOC_DRA72;
        if (of_machine_is_compatible("ti,dra74"))
@@ -542,27 +555,15 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
                soc_mask = CLKF_SOC_DRA76;
 #endif
 #ifdef CONFIG_SOC_AM33XX
-       if (of_machine_is_compatible("ti,am33xx")) {
-               if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-                       data = am3_clkctrl_compat_data;
-               else
-                       data = am3_clkctrl_data;
-       }
+       if (of_machine_is_compatible("ti,am33xx"))
+               data = am3_clkctrl_data;
 #endif
 #ifdef CONFIG_SOC_AM43XX
-       if (of_machine_is_compatible("ti,am4372")) {
-               if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-                       data = am4_clkctrl_compat_data;
-               else
-                       data = am4_clkctrl_data;
-       }
+       if (of_machine_is_compatible("ti,am4372"))
+               data = am4_clkctrl_data;
 
-       if (of_machine_is_compatible("ti,am438x")) {
-               if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-                       data = am438x_clkctrl_compat_data;
-               else
-                       data = am438x_clkctrl_data;
-       }
+       if (of_machine_is_compatible("ti,am438x"))
+               data = am438x_clkctrl_data;
 #endif
 #ifdef CONFIG_SOC_TI81XX
        if (of_machine_is_compatible("ti,dm814"))
@@ -603,7 +604,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 
        /*
         * The code below can be removed when all clkctrl nodes use domain
-        * specific compatible proprerty and standard clock node naming
+        * specific compatible property and standard clock node naming
         */
        if (legacy_naming) {
                provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
index f1dd62d..c841d2d 100644 (file)
@@ -201,10 +201,7 @@ extern const struct omap_clkctrl_data am3_clkctrl_data[];
 extern const struct omap_clkctrl_data am3_clkctrl_compat_data[];
 extern struct ti_dt_clk am33xx_compat_clks[];
 extern const struct omap_clkctrl_data am4_clkctrl_data[];
-extern const struct omap_clkctrl_data am4_clkctrl_compat_data[];
-extern struct ti_dt_clk am43xx_compat_clks[];
 extern const struct omap_clkctrl_data am438x_clkctrl_data[];
-extern const struct omap_clkctrl_data am438x_clkctrl_compat_data[];
 extern const struct omap_clkctrl_data dm814_clkctrl_data[];
 extern const struct omap_clkctrl_data dm816_clkctrl_data[];
 
@@ -214,6 +211,7 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
                            const char *con);
 struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
                                    const char *con);
+const char *ti_dt_clk_name(struct device_node *np);
 int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
 void ti_clk_add_aliases(void);
 
index 74831b2..24179c9 100644 (file)
@@ -131,7 +131,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
 {
        struct clk *clk;
        struct clk_hw *clk_hw;
-       const char *clkdm_name = node->name;
+       const char *clkdm_name = ti_dt_clk_name(node);
        int i;
        unsigned int num_clks;
 
index eaa4357..8d60319 100644 (file)
@@ -125,6 +125,7 @@ static void __init _register_composite(void *user,
        struct component_clk *comp;
        int num_parents = 0;
        const char **parent_names = NULL;
+       const char *name;
        int i;
        int ret;
 
@@ -172,7 +173,8 @@ static void __init _register_composite(void *user,
                goto cleanup;
        }
 
-       clk = clk_register_composite(NULL, node->name,
+       name = ti_dt_clk_name(node);
+       clk = clk_register_composite(NULL, name,
                                     parent_names, num_parents,
                                     _get_hw(cclk, CLK_COMPONENT_TYPE_MUX),
                                     &ti_clk_mux_ops,
@@ -182,7 +184,7 @@ static void __init _register_composite(void *user,
                                     &ti_composite_gate_ops, 0);
 
        if (!IS_ERR(clk)) {
-               ret = ti_clk_add_alias(NULL, clk, node->name);
+               ret = ti_clk_add_alias(NULL, clk, name);
                if (ret) {
                        clk_unregister(clk);
                        goto cleanup;
index 28080df..9fbea09 100644 (file)
@@ -320,10 +320,12 @@ static struct clk *_register_divider(struct device_node *node,
        struct clk *clk;
        struct clk_init_data init;
        const char *parent_name;
+       const char *name;
 
        parent_name = of_clk_get_parent_name(node, 0);
 
-       init.name = node->name;
+       name = ti_dt_clk_name(node);
+       init.name = name;
        init.ops = &ti_clk_divider_ops;
        init.flags = flags;
        init.parent_names = (parent_name ? &parent_name : NULL);
@@ -332,7 +334,7 @@ static struct clk *_register_divider(struct device_node *node,
        div->hw.init = &init;
 
        /* register the clock */
-       clk = ti_clk_register(NULL, &div->hw, node->name);
+       clk = ti_clk_register(NULL, &div->hw, name);
 
        if (IS_ERR(clk))
                kfree(div);
index e9f9aee..7c6dc84 100644 (file)
@@ -164,6 +164,7 @@ static void __init _register_dpll(void *user,
        struct clk_hw *hw = user;
        struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
        struct dpll_data *dd = clk_hw->dpll_data;
+       const char *name;
        struct clk *clk;
        const struct clk_init_data *init = hw->init;
 
@@ -193,7 +194,8 @@ static void __init _register_dpll(void *user,
        dd->clk_bypass = __clk_get_hw(clk);
 
        /* register the clock */
-       clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
+       name = ti_dt_clk_name(node);
+       clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
 
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -227,7 +229,7 @@ static void _register_dpll_x2(struct device_node *node,
        struct clk *clk;
        struct clk_init_data init = { NULL };
        struct clk_hw_omap *clk_hw;
-       const char *name = node->name;
+       const char *name = ti_dt_clk_name(node);
        const char *parent_name;
 
        parent_name = of_clk_get_parent_name(node, 0);
@@ -304,7 +306,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
        clk_hw->ops = &clkhwops_omap3_dpll;
        clk_hw->hw.init = init;
 
-       init->name = node->name;
+       init->name = ti_dt_clk_name(node);
        init->ops = ops;
 
        init->num_parents = of_clk_get_parent_count(node);
index 8024c6d..749c6b7 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 /* FAPLL Control Register PLL_CTRL */
 #define FAPLL_MAIN_MULT_N_SHIFT        16
 #define FAPLL_MAIN_DIV_P_SHIFT 8
@@ -542,6 +544,7 @@ static void __init ti_fapll_setup(struct device_node *node)
        struct clk_init_data *init = NULL;
        const char *parent_name[2];
        struct clk *pll_clk;
+       const char *name;
        int i;
 
        fd = kzalloc(sizeof(*fd), GFP_KERNEL);
@@ -559,7 +562,8 @@ static void __init ti_fapll_setup(struct device_node *node)
                goto free;
 
        init->ops = &ti_fapll_ops;
-       init->name = node->name;
+       name = ti_dt_clk_name(node);
+       init->name = name;
 
        init->num_parents = of_clk_get_parent_count(node);
        if (init->num_parents != 2) {
@@ -591,7 +595,7 @@ static void __init ti_fapll_setup(struct device_node *node)
        if (fapll_is_ddr_pll(fd->base))
                fd->bypass_bit_inverted = true;
 
-       fd->name = node->name;
+       fd->name = name;
        fd->hw.init = init;
 
        /* Register the parent PLL */
@@ -638,8 +642,7 @@ static void __init ti_fapll_setup(struct device_node *node)
                                freq = NULL;
                }
                synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
-                                                output_name, node->name,
-                                                pll_clk);
+                                                output_name, name, pll_clk);
                if (IS_ERR(synth_clk))
                        continue;
 
index 7cbe896..8cb00d0 100644 (file)
@@ -36,7 +36,7 @@
 static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
 {
        struct clk *clk;
-       const char *clk_name = node->name;
+       const char *clk_name = ti_dt_clk_name(node);
        const char *parent_name;
        u32 div, mult;
        u32 flags = 0;
index b1d0fdb..0033de9 100644 (file)
@@ -138,6 +138,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
        struct clk *clk;
        const char *parent_name;
        struct clk_omap_reg reg;
+       const char *name;
        u8 enable_bit = 0;
        u32 val;
        u32 flags = 0;
@@ -164,7 +165,8 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
        if (of_property_read_bool(node, "ti,set-bit-to-disable"))
                clk_gate_flags |= INVERT_ENABLE;
 
-       clk = _register_gate(NULL, node->name, parent_name, flags, &reg,
+       name = ti_dt_clk_name(node);
+       clk = _register_gate(NULL, name, parent_name, flags, &reg,
                             enable_bit, clk_gate_flags, ops, hw_ops);
 
        if (!IS_ERR(clk))
index 83e3442..dd2b455 100644 (file)
@@ -72,6 +72,7 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node,
        const char *parent_name;
        struct clk_omap_reg reg;
        u8 enable_bit = 0;
+       const char *name;
        u32 val;
 
        if (ti_clk_get_reg_addr(node, 0, &reg))
@@ -86,7 +87,8 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node,
                return;
        }
 
-       clk = _register_interface(NULL, node->name, parent_name, &reg,
+       name = ti_dt_clk_name(node);
+       clk = _register_interface(NULL, name, parent_name, &reg,
                                  enable_bit, ops);
 
        if (!IS_ERR(clk))
index 0069e7c..15de513 100644 (file)
@@ -176,6 +176,7 @@ static void of_mux_clk_setup(struct device_node *node)
        struct clk_omap_reg reg;
        unsigned int num_parents;
        const char **parent_names;
+       const char *name;
        u8 clk_mux_flags = 0;
        u32 mask = 0;
        u32 shift = 0;
@@ -213,7 +214,8 @@ static void of_mux_clk_setup(struct device_node *node)
 
        mask = (1 << fls(mask)) - 1;
 
-       clk = _register_mux(NULL, node->name, parent_names, num_parents,
+       name = ti_dt_clk_name(node);
+       clk = _register_mux(NULL, name, parent_names, num_parents,
                            flags, &reg, shift, mask, latch, clk_mux_flags,
                            NULL);
 
index 5319cd3..3bc55ab 100644 (file)
@@ -24,6 +24,7 @@ struct clk_hw *uniphier_clk_register_fixed_rate(struct device *dev,
 
        init.name = name;
        init.ops = &clk_fixed_rate_ops;
+       init.flags = 0;
        init.parent_names = NULL;
        init.num_parents = 0;
 
index c2b2f41..6c753b2 100644 (file)
@@ -176,7 +176,7 @@ static const struct visconti_clk_gate_table clk_gate_tables[] = {
        { TMPV770X_CLK_WRCK, "wrck",
                clks_parent_data, ARRAY_SIZE(clks_parent_data),
                0, 0x68, 0x168, 9, 32,
-               -1, }, /* No reset */
+               NO_RESET, },
        { TMPV770X_CLK_PICKMON, "pickmon",
                clks_parent_data, ARRAY_SIZE(clks_parent_data),
                0, 0x10, 0x110, 8, 4,
index 56a8a4f..d0b193b 100644 (file)
@@ -147,7 +147,7 @@ int visconti_clk_register_gates(struct visconti_clk_provider *ctx,
                if (!dev_name)
                        return -ENOMEM;
 
-               if (clks[i].rs_id >= 0) {
+               if (clks[i].rs_id != NO_RESET) {
                        rson_offset = reset[clks[i].rs_id].rson_offset;
                        rsoff_offset = reset[clks[i].rs_id].rsoff_offset;
                        rs_idx = reset[clks[i].rs_id].rs_idx;
index 09ed82f..8756a1e 100644 (file)
@@ -73,4 +73,7 @@ int visconti_clk_register_gates(struct visconti_clk_provider *data,
                                 int num_gate,
                                 const struct visconti_reset_data *reset,
                                 spinlock_t *lock);
+
+#define NO_RESET 0xFF
+
 #endif /* _VISCONTI_CLKC_H_ */
index 204b83d..7bdeaff 100644 (file)
@@ -349,19 +349,20 @@ static void __init zynq_clk_setup(struct device_node *np)
        /* Peripheral clocks */
        for (i = fclk0; i <= fclk3; i++) {
                int enable = !!(fclk_enable & BIT(i - fclk0));
+
                zynq_clk_register_fclk(i, clk_output_name[i],
                                SLCR_FPGA0_CLK_CTRL + 0x10 * (i - fclk0),
                                periph_parents, enable);
        }
 
-       zynq_clk_register_periph_clk(lqspi, 0, clk_output_name[lqspi], NULL,
-                       SLCR_LQSPI_CLK_CTRL, periph_parents, 0);
+       zynq_clk_register_periph_clk(lqspi, clk_max, clk_output_name[lqspi], NULL,
+                                    SLCR_LQSPI_CLK_CTRL, periph_parents, 0);
 
-       zynq_clk_register_periph_clk(smc, 0, clk_output_name[smc], NULL,
-                       SLCR_SMC_CLK_CTRL, periph_parents, 0);
+       zynq_clk_register_periph_clk(smc, clk_max, clk_output_name[smc], NULL,
+                                    SLCR_SMC_CLK_CTRL, periph_parents, 0);
 
-       zynq_clk_register_periph_clk(pcap, 0, clk_output_name[pcap], NULL,
-                       SLCR_PCAP_CLK_CTRL, periph_parents, 0);
+       zynq_clk_register_periph_clk(pcap, clk_max, clk_output_name[pcap], NULL,
+                                    SLCR_PCAP_CLK_CTRL, periph_parents, 0);
 
        zynq_clk_register_periph_clk(sdio0, sdio1, clk_output_name[sdio0],
                        clk_output_name[sdio1], SLCR_SDIO_CLK_CTRL,
index 565ed67..b89e557 100644 (file)
@@ -41,8 +41,8 @@ static int zynqmp_clk_gate_enable(struct clk_hw *hw)
        ret = zynqmp_pm_clock_enable(clk_id);
 
        if (ret)
-               pr_warn_once("%s() clock enabled failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() clock enable failed for %s (id %d), ret = %d\n",
+                        __func__, clk_name, clk_id, ret);
 
        return ret;
 }
@@ -61,8 +61,8 @@ static void zynqmp_clk_gate_disable(struct clk_hw *hw)
        ret = zynqmp_pm_clock_disable(clk_id);
 
        if (ret)
-               pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() clock disable failed for %s (id %d), ret = %d\n",
+                        __func__, clk_name, clk_id, ret);
 }
 
 /**
@@ -80,8 +80,8 @@ static int zynqmp_clk_gate_is_enabled(struct clk_hw *hw)
 
        ret = zynqmp_pm_clock_getstate(clk_id, &state);
        if (ret) {
-               pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() clock get state failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
                return -EIO;
        }
 
index 17afce5..6035933 100644 (file)
@@ -51,8 +51,8 @@ static u8 zynqmp_clk_mux_get_parent(struct clk_hw *hw)
        ret = zynqmp_pm_clock_getparent(clk_id, &val);
 
        if (ret) {
-               pr_warn_once("%s() getparent failed for clock: %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() getparent failed for clock: %s, ret = %d\n",
+                        __func__, clk_name, ret);
                /*
                 * clk_core_get_parent_by_index() takes num_parents as incorrect
                 * index which is exactly what I want to return here
@@ -80,8 +80,8 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
        ret = zynqmp_pm_clock_setparent(clk_id, index);
 
        if (ret)
-               pr_warn_once("%s() set parent failed for clock: %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() set parent failed for clock: %s, ret = %d\n",
+                        __func__, clk_name, ret);
 
        return ret;
 }
index cb49281..422ea79 100644 (file)
@@ -89,8 +89,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
        ret = zynqmp_pm_clock_getdivider(clk_id, &div);
 
        if (ret)
-               pr_warn_once("%s() get divider failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() get divider failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
 
        if (div_type == TYPE_DIV1)
                value = div & 0xFFFF;
@@ -177,8 +177,8 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
                ret = zynqmp_pm_clock_getdivider(clk_id, &bestdiv);
 
                if (ret)
-                       pr_warn_once("%s() get divider failed for %s, ret = %d\n",
-                                    __func__, clk_name, ret);
+                       pr_debug("%s() get divider failed for %s, ret = %d\n",
+                                __func__, clk_name, ret);
                if (div_type == TYPE_DIV1)
                        bestdiv = bestdiv & 0xFFFF;
                else
@@ -244,8 +244,8 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
        ret = zynqmp_pm_clock_setdivider(clk_id, div);
 
        if (ret)
-               pr_warn_once("%s() set divider failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() set divider failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
 
        return ret;
 }
index 036e4ff..91a6b4c 100644 (file)
@@ -56,8 +56,8 @@ static inline enum pll_mode zynqmp_pll_get_mode(struct clk_hw *hw)
 
        ret = zynqmp_pm_get_pll_frac_mode(clk_id, ret_payload);
        if (ret) {
-               pr_warn_once("%s() PLL get frac mode failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() PLL get frac mode failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
                return PLL_MODE_ERROR;
        }
 
@@ -84,8 +84,8 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
 
        ret = zynqmp_pm_set_pll_frac_mode(clk_id, mode);
        if (ret)
-               pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() PLL set frac mode failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
        else
                clk->set_pll_mode = true;
 }
@@ -145,8 +145,8 @@ static unsigned long zynqmp_pll_recalc_rate(struct clk_hw *hw,
 
        ret = zynqmp_pm_clock_getdivider(clk_id, &fbdiv);
        if (ret) {
-               pr_warn_once("%s() get divider failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() get divider failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
                return 0ul;
        }
 
@@ -200,8 +200,8 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
                        WARN(1, "More than allowed devices are using the %s, which is forbidden\n",
                             clk_name);
                else if (ret)
-                       pr_warn_once("%s() set divider failed for %s, ret = %d\n",
-                                    __func__, clk_name, ret);
+                       pr_debug("%s() set divider failed for %s, ret = %d\n",
+                                __func__, clk_name, ret);
                zynqmp_pm_set_pll_frac_data(clk_id, f);
 
                return rate + frac;
@@ -211,8 +211,8 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
        fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
        ret = zynqmp_pm_clock_setdivider(clk_id, fbdiv);
        if (ret)
-               pr_warn_once("%s() set divider failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() set divider failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
 
        return parent_rate * fbdiv;
 }
@@ -233,8 +233,8 @@ static int zynqmp_pll_is_enabled(struct clk_hw *hw)
 
        ret = zynqmp_pm_clock_getstate(clk_id, &state);
        if (ret) {
-               pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() clock get state failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
                return -EIO;
        }
 
@@ -265,8 +265,8 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
 
        ret = zynqmp_pm_clock_enable(clk_id);
        if (ret)
-               pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() clock enable failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
 
        return ret;
 }
@@ -287,8 +287,8 @@ static void zynqmp_pll_disable(struct clk_hw *hw)
 
        ret = zynqmp_pm_clock_disable(clk_id);
        if (ret)
-               pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
-                            __func__, clk_name, ret);
+               pr_debug("%s() clock disable failed for %s, ret = %d\n",
+                        __func__, clk_name, ret);
 }
 
 static const struct clk_ops zynqmp_pll_ops = {
index c0aeedd..ff71dd6 100644 (file)
@@ -47,6 +47,10 @@ config CPU_IDLE_GOV_HALTPOLL
 config DT_IDLE_STATES
        bool
 
+config DT_IDLE_GENPD
+       depends on PM_GENERIC_DOMAINS_OF
+       bool
+
 menu "ARM CPU Idle Drivers"
 depends on ARM || ARM64
 source "drivers/cpuidle/Kconfig.arm"
@@ -62,6 +66,11 @@ depends on PPC
 source "drivers/cpuidle/Kconfig.powerpc"
 endmenu
 
+menu "RISC-V CPU Idle Drivers"
+depends on RISCV
+source "drivers/cpuidle/Kconfig.riscv"
+endmenu
+
 config HALTPOLL_CPUIDLE
        tristate "Halt poll cpuidle driver"
        depends on X86 && KVM_GUEST
index 15d6c46..be7f512 100644 (file)
@@ -27,6 +27,7 @@ config ARM_PSCI_CPUIDLE_DOMAIN
        bool "PSCI CPU idle Domain"
        depends on ARM_PSCI_CPUIDLE
        depends on PM_GENERIC_DOMAINS_OF
+       select DT_IDLE_GENPD
        default y
        help
          Select this to enable the PSCI based CPUidle driver to use PM domains,
diff --git a/drivers/cpuidle/Kconfig.riscv b/drivers/cpuidle/Kconfig.riscv
new file mode 100644 (file)
index 0000000..78518c2
--- /dev/null
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RISC-V CPU Idle drivers
+#
+
+config RISCV_SBI_CPUIDLE
+       bool "RISC-V SBI CPU idle Driver"
+       depends on RISCV_SBI
+       select DT_IDLE_STATES
+       select CPU_IDLE_MULTIPLE_DRIVERS
+       select DT_IDLE_GENPD if PM_GENERIC_DOMAINS_OF
+       help
+         Select this option to enable RISC-V SBI firmware based CPU idle
+         driver for RISC-V systems. This drivers also supports hierarchical
+         DT based layout of the idle state.
index 26bbc5e..d103342 100644 (file)
@@ -6,6 +6,7 @@
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
 obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 obj-$(CONFIG_DT_IDLE_STATES)             += dt_idle_states.o
+obj-$(CONFIG_DT_IDLE_GENPD)              += dt_idle_genpd.o
 obj-$(CONFIG_ARCH_HAS_CPU_RELAX)         += poll_state.o
 obj-$(CONFIG_HALTPOLL_CPUIDLE)           += cpuidle-haltpoll.o
 
@@ -34,3 +35,7 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE)                += cpuidle-cps.o
 # POWERPC drivers
 obj-$(CONFIG_PSERIES_CPUIDLE)          += cpuidle-pseries.o
 obj-$(CONFIG_POWERNV_CPUIDLE)          += cpuidle-powernv.o
+
+###############################################################################
+# RISC-V drivers
+obj-$(CONFIG_RISCV_SBI_CPUIDLE)                += cpuidle-riscv-sbi.o
index ff2c3f8..755bbdf 100644 (file)
@@ -47,73 +47,14 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
        return 0;
 }
 
-static int psci_pd_parse_state_nodes(struct genpd_power_state *states,
-                                    int state_count)
-{
-       int i, ret;
-       u32 psci_state, *psci_state_buf;
-
-       for (i = 0; i < state_count; i++) {
-               ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
-                                       &psci_state);
-               if (ret)
-                       goto free_state;
-
-               psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
-               if (!psci_state_buf) {
-                       ret = -ENOMEM;
-                       goto free_state;
-               }
-               *psci_state_buf = psci_state;
-               states[i].data = psci_state_buf;
-       }
-
-       return 0;
-
-free_state:
-       i--;
-       for (; i >= 0; i--)
-               kfree(states[i].data);
-       return ret;
-}
-
-static int psci_pd_parse_states(struct device_node *np,
-                       struct genpd_power_state **states, int *state_count)
-{
-       int ret;
-
-       /* Parse the domain idle states. */
-       ret = of_genpd_parse_idle_states(np, states, state_count);
-       if (ret)
-               return ret;
-
-       /* Fill out the PSCI specifics for each found state. */
-       ret = psci_pd_parse_state_nodes(*states, *state_count);
-       if (ret)
-               kfree(*states);
-
-       return ret;
-}
-
-static void psci_pd_free_states(struct genpd_power_state *states,
-                               unsigned int state_count)
-{
-       int i;
-
-       for (i = 0; i < state_count; i++)
-               kfree(states[i].data);
-       kfree(states);
-}
-
 static int psci_pd_init(struct device_node *np, bool use_osi)
 {
        struct generic_pm_domain *pd;
        struct psci_pd_provider *pd_provider;
        struct dev_power_governor *pd_gov;
-       struct genpd_power_state *states = NULL;
        int ret = -ENOMEM, state_count = 0;
 
-       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       pd = dt_idle_pd_alloc(np, psci_dt_parse_state_node);
        if (!pd)
                goto out;
 
@@ -121,22 +62,6 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
        if (!pd_provider)
                goto free_pd;
 
-       pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
-       if (!pd->name)
-               goto free_pd_prov;
-
-       /*
-        * Parse the domain idle states and let genpd manage the state selection
-        * for those being compatible with "domain-idle-state".
-        */
-       ret = psci_pd_parse_states(np, &states, &state_count);
-       if (ret)
-               goto free_name;
-
-       pd->free_states = psci_pd_free_states;
-       pd->name = kbasename(pd->name);
-       pd->states = states;
-       pd->state_count = state_count;
        pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
 
        /* Allow power off when OSI has been successfully enabled. */
@@ -149,10 +74,8 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
        pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
 
        ret = pm_genpd_init(pd, pd_gov, false);
-       if (ret) {
-               psci_pd_free_states(states, state_count);
-               goto free_name;
-       }
+       if (ret)
+               goto free_pd_prov;
 
        ret = of_genpd_add_provider_simple(np, pd);
        if (ret)
@@ -166,12 +89,10 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
 
 remove_pd:
        pm_genpd_remove(pd);
-free_name:
-       kfree(pd->name);
 free_pd_prov:
        kfree(pd_provider);
 free_pd:
-       kfree(pd);
+       dt_idle_pd_free(pd);
 out:
        pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
        return ret;
@@ -195,30 +116,6 @@ static void psci_pd_remove(void)
        }
 }
 
-static int psci_pd_init_topology(struct device_node *np)
-{
-       struct device_node *node;
-       struct of_phandle_args child, parent;
-       int ret;
-
-       for_each_child_of_node(np, node) {
-               if (of_parse_phandle_with_args(node, "power-domains",
-                                       "#power-domain-cells", 0, &parent))
-                       continue;
-
-               child.np = node;
-               child.args_count = 0;
-               ret = of_genpd_add_subdomain(&parent, &child);
-               of_node_put(parent.np);
-               if (ret) {
-                       of_node_put(node);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
 static bool psci_pd_try_set_osi_mode(void)
 {
        int ret;
@@ -282,7 +179,7 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
                goto no_pd;
 
        /* Link genpd masters/subdomains to model the CPU topology. */
-       ret = psci_pd_init_topology(np);
+       ret = dt_idle_pd_init_topology(np);
        if (ret)
                goto remove_pd;
 
@@ -314,28 +211,3 @@ static int __init psci_idle_init_domains(void)
        return platform_driver_register(&psci_cpuidle_domain_driver);
 }
 subsys_initcall(psci_idle_init_domains);
-
-struct device *psci_dt_attach_cpu(int cpu)
-{
-       struct device *dev;
-
-       dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
-       if (IS_ERR_OR_NULL(dev))
-               return dev;
-
-       pm_runtime_irq_safe(dev);
-       if (cpu_online(cpu))
-               pm_runtime_get_sync(dev);
-
-       dev_pm_syscore_device(dev, true);
-
-       return dev;
-}
-
-void psci_dt_detach_cpu(struct device *dev)
-{
-       if (IS_ERR_OR_NULL(dev))
-               return;
-
-       dev_pm_domain_detach(dev, false);
-}
index d8e925e..4e13264 100644 (file)
@@ -10,8 +10,19 @@ void psci_set_domain_state(u32 state);
 int psci_dt_parse_state_node(struct device_node *np, u32 *state);
 
 #ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN
-struct device *psci_dt_attach_cpu(int cpu);
-void psci_dt_detach_cpu(struct device *dev);
+
+#include "dt_idle_genpd.h"
+
+static inline struct device *psci_dt_attach_cpu(int cpu)
+{
+       return dt_idle_attach_cpu(cpu, "psci");
+}
+
+static inline void psci_dt_detach_cpu(struct device *dev)
+{
+       dt_idle_detach_cpu(dev);
+}
+
 #else
 static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; }
 static inline void psci_dt_detach_cpu(struct device *dev) { }
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
new file mode 100644 (file)
index 0000000..b459eda
--- /dev/null
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RISC-V SBI CPU idle driver.
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu_cooling.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <asm/cpuidle.h>
+#include <asm/sbi.h>
+#include <asm/suspend.h>
+
+#include "dt_idle_states.h"
+#include "dt_idle_genpd.h"
+
+struct sbi_cpuidle_data {
+       u32 *states;
+       struct device *dev;
+};
+
+struct sbi_domain_state {
+       bool available;
+       u32 state;
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
+static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
+static bool sbi_cpuidle_use_osi;
+static bool sbi_cpuidle_use_cpuhp;
+static bool sbi_cpuidle_pd_allow_domain_state;
+
+static inline void sbi_set_domain_state(u32 state)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       data->available = true;
+       data->state = state;
+}
+
+static inline u32 sbi_get_domain_state(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       return data->state;
+}
+
+static inline void sbi_clear_domain_state(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       data->available = false;
+}
+
+static inline bool sbi_is_domain_state_available(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       return data->available;
+}
+
+static int sbi_suspend_finisher(unsigned long suspend_type,
+                               unsigned long resume_addr,
+                               unsigned long opaque)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
+                       suspend_type, resume_addr, opaque, 0, 0, 0);
+
+       return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
+}
+
+static int sbi_suspend(u32 state)
+{
+       if (state & SBI_HSM_SUSP_NON_RET_BIT)
+               return cpu_suspend(state, sbi_suspend_finisher);
+       else
+               return sbi_suspend_finisher(state, 0, 0);
+}
+
+static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+                                  struct cpuidle_driver *drv, int idx)
+{
+       u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
+
+       return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
+}
+
+static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+                                         struct cpuidle_driver *drv, int idx,
+                                         bool s2idle)
+{
+       struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
+       u32 *states = data->states;
+       struct device *pd_dev = data->dev;
+       u32 state;
+       int ret;
+
+       ret = cpu_pm_enter();
+       if (ret)
+               return -1;
+
+       /* Do runtime PM to manage a hierarchical CPU toplogy. */
+       rcu_irq_enter_irqson();
+       if (s2idle)
+               dev_pm_genpd_suspend(pd_dev);
+       else
+               pm_runtime_put_sync_suspend(pd_dev);
+       rcu_irq_exit_irqson();
+
+       if (sbi_is_domain_state_available())
+               state = sbi_get_domain_state();
+       else
+               state = states[idx];
+
+       ret = sbi_suspend(state) ? -1 : idx;
+
+       rcu_irq_enter_irqson();
+       if (s2idle)
+               dev_pm_genpd_resume(pd_dev);
+       else
+               pm_runtime_get_sync(pd_dev);
+       rcu_irq_exit_irqson();
+
+       cpu_pm_exit();
+
+       /* Clear the domain state to start fresh when back from idle. */
+       sbi_clear_domain_state();
+       return ret;
+}
+
+static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+                                      struct cpuidle_driver *drv, int idx)
+{
+       return __sbi_enter_domain_idle_state(dev, drv, idx, false);
+}
+
+static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
+                                             struct cpuidle_driver *drv,
+                                             int idx)
+{
+       return __sbi_enter_domain_idle_state(dev, drv, idx, true);
+}
+
+static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
+{
+       struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+       if (pd_dev)
+               pm_runtime_get_sync(pd_dev);
+
+       return 0;
+}
+
+static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
+{
+       struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+       if (pd_dev) {
+               pm_runtime_put_sync(pd_dev);
+               /* Clear domain state to start fresh at next online. */
+               sbi_clear_domain_state();
+       }
+
+       return 0;
+}
+
+static void sbi_idle_init_cpuhp(void)
+{
+       int err;
+
+       if (!sbi_cpuidle_use_cpuhp)
+               return;
+
+       err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+                                       "cpuidle/sbi:online",
+                                       sbi_cpuidle_cpuhp_up,
+                                       sbi_cpuidle_cpuhp_down);
+       if (err)
+               pr_warn("Failed %d while setup cpuhp state\n", err);
+}
+
+static const struct of_device_id sbi_cpuidle_state_match[] = {
+       { .compatible = "riscv,idle-state",
+         .data = sbi_cpuidle_enter_state },
+       { },
+};
+
+static bool sbi_suspend_state_is_valid(u32 state)
+{
+       if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
+           state < SBI_HSM_SUSPEND_RET_PLATFORM)
+               return false;
+       if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
+           state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
+               return false;
+       return true;
+}
+
+static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+       int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
+
+       if (err) {
+               pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
+               return err;
+       }
+
+       if (!sbi_suspend_state_is_valid(*state)) {
+               pr_warn("Invalid SBI suspend state %#x\n", *state);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
+                                    struct sbi_cpuidle_data *data,
+                                    unsigned int state_count, int cpu)
+{
+       /* Currently limit the hierarchical topology to be used in OSI mode. */
+       if (!sbi_cpuidle_use_osi)
+               return 0;
+
+       data->dev = dt_idle_attach_cpu(cpu, "sbi");
+       if (IS_ERR_OR_NULL(data->dev))
+               return PTR_ERR_OR_ZERO(data->dev);
+
+       /*
+        * Using the deepest state for the CPU to trigger a potential selection
+        * of a shared state for the domain, assumes the domain states are all
+        * deeper states.
+        */
+       drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
+       drv->states[state_count - 1].enter_s2idle =
+                                       sbi_enter_s2idle_domain_idle_state;
+       sbi_cpuidle_use_cpuhp = true;
+
+       return 0;
+}
+
+static int sbi_cpuidle_dt_init_states(struct device *dev,
+                                       struct cpuidle_driver *drv,
+                                       unsigned int cpu,
+                                       unsigned int state_count)
+{
+       struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+       struct device_node *state_node;
+       struct device_node *cpu_node;
+       u32 *states;
+       int i, ret;
+
+       cpu_node = of_cpu_device_node_get(cpu);
+       if (!cpu_node)
+               return -ENODEV;
+
+       states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
+       if (!states) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       /* Parse SBI specific details from state DT nodes */
+       for (i = 1; i < state_count; i++) {
+               state_node = of_get_cpu_state_node(cpu_node, i - 1);
+               if (!state_node)
+                       break;
+
+               ret = sbi_dt_parse_state_node(state_node, &states[i]);
+               of_node_put(state_node);
+
+               if (ret)
+                       return ret;
+
+               pr_debug("sbi-state %#x index %d\n", states[i], i);
+       }
+       if (i != state_count) {
+               ret = -ENODEV;
+               goto fail;
+       }
+
+       /* Initialize optional data, used for the hierarchical topology. */
+       ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
+       if (ret < 0)
+               return ret;
+
+       /* Store states in the per-cpu struct. */
+       data->states = states;
+
+fail:
+       of_node_put(cpu_node);
+
+       return ret;
+}
+
+static void sbi_cpuidle_deinit_cpu(int cpu)
+{
+       struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+
+       dt_idle_detach_cpu(data->dev);
+       sbi_cpuidle_use_cpuhp = false;
+}
+
+static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
+{
+       struct cpuidle_driver *drv;
+       unsigned int state_count = 0;
+       int ret = 0;
+
+       drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+       if (!drv)
+               return -ENOMEM;
+
+       drv->name = "sbi_cpuidle";
+       drv->owner = THIS_MODULE;
+       drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+       /* RISC-V architectural WFI to be represented as state index 0. */
+       drv->states[0].enter = sbi_cpuidle_enter_state;
+       drv->states[0].exit_latency = 1;
+       drv->states[0].target_residency = 1;
+       drv->states[0].power_usage = UINT_MAX;
+       strcpy(drv->states[0].name, "WFI");
+       strcpy(drv->states[0].desc, "RISC-V WFI");
+
+       /*
+        * If no DT idle states are detected (ret == 0) let the driver
+        * initialization fail accordingly since there is no reason to
+        * initialize the idle driver if only wfi is supported, the
+        * default archictectural back-end already executes wfi
+        * on idle entry.
+        */
+       ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
+       if (ret <= 0) {
+               pr_debug("HART%ld: failed to parse DT idle states\n",
+                        cpuid_to_hartid_map(cpu));
+               return ret ? : -ENODEV;
+       }
+       state_count = ret + 1; /* Include WFI state as well */
+
+       /* Initialize idle states from DT. */
+       ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
+       if (ret) {
+               pr_err("HART%ld: failed to init idle states\n",
+                      cpuid_to_hartid_map(cpu));
+               return ret;
+       }
+
+       ret = cpuidle_register(drv, NULL);
+       if (ret)
+               goto deinit;
+
+       cpuidle_cooling_register(drv);
+
+       return 0;
+deinit:
+       sbi_cpuidle_deinit_cpu(cpu);
+       return ret;
+}
+
+static void sbi_cpuidle_domain_sync_state(struct device *dev)
+{
+       /*
+        * All devices have now been attached/probed to the PM domain
+        * topology, hence it's fine to allow domain states to be picked.
+        */
+       sbi_cpuidle_pd_allow_domain_state = true;
+}
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
+{
+       struct genpd_power_state *state = &pd->states[pd->state_idx];
+       u32 *pd_state;
+
+       if (!state->data)
+               return 0;
+
+       if (!sbi_cpuidle_pd_allow_domain_state)
+               return -EBUSY;
+
+       /* OSI mode is enabled, set the corresponding domain state. */
+       pd_state = state->data;
+       sbi_set_domain_state(*pd_state);
+
+       return 0;
+}
+
+struct sbi_pd_provider {
+       struct list_head link;
+       struct device_node *node;
+};
+
+static LIST_HEAD(sbi_pd_providers);
+
+static int sbi_pd_init(struct device_node *np)
+{
+       struct generic_pm_domain *pd;
+       struct sbi_pd_provider *pd_provider;
+       struct dev_power_governor *pd_gov;
+       int ret = -ENOMEM, state_count = 0;
+
+       pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
+       if (!pd)
+               goto out;
+
+       pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
+       if (!pd_provider)
+               goto free_pd;
+
+       pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+
+       /* Allow power off when OSI is available. */
+       if (sbi_cpuidle_use_osi)
+               pd->power_off = sbi_cpuidle_pd_power_off;
+       else
+               pd->flags |= GENPD_FLAG_ALWAYS_ON;
+
+       /* Use governor for CPU PM domains if it has some states to manage. */
+       pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
+
+       ret = pm_genpd_init(pd, pd_gov, false);
+       if (ret)
+               goto free_pd_prov;
+
+       ret = of_genpd_add_provider_simple(np, pd);
+       if (ret)
+               goto remove_pd;
+
+       pd_provider->node = of_node_get(np);
+       list_add(&pd_provider->link, &sbi_pd_providers);
+
+       pr_debug("init PM domain %s\n", pd->name);
+       return 0;
+
+remove_pd:
+       pm_genpd_remove(pd);
+free_pd_prov:
+       kfree(pd_provider);
+free_pd:
+       dt_idle_pd_free(pd);
+out:
+       pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
+       return ret;
+}
+
+static void sbi_pd_remove(void)
+{
+       struct sbi_pd_provider *pd_provider, *it;
+       struct generic_pm_domain *genpd;
+
+       list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
+               of_genpd_del_provider(pd_provider->node);
+
+               genpd = of_genpd_remove_last(pd_provider->node);
+               if (!IS_ERR(genpd))
+                       kfree(genpd);
+
+               of_node_put(pd_provider->node);
+               list_del(&pd_provider->link);
+               kfree(pd_provider);
+       }
+}
+
+static int sbi_genpd_probe(struct device_node *np)
+{
+       struct device_node *node;
+       int ret = 0, pd_count = 0;
+
+       if (!np)
+               return -ENODEV;
+
+       /*
+        * Parse child nodes for the "#power-domain-cells" property and
+        * initialize a genpd/genpd-of-provider pair when it's found.
+        */
+       for_each_child_of_node(np, node) {
+               if (!of_find_property(node, "#power-domain-cells", NULL))
+                       continue;
+
+               ret = sbi_pd_init(node);
+               if (ret)
+                       goto put_node;
+
+               pd_count++;
+       }
+
+       /* Bail out if not using the hierarchical CPU topology. */
+       if (!pd_count)
+               goto no_pd;
+
+       /* Link genpd masters/subdomains to model the CPU topology. */
+       ret = dt_idle_pd_init_topology(np);
+       if (ret)
+               goto remove_pd;
+
+       return 0;
+
+put_node:
+       of_node_put(node);
+remove_pd:
+       sbi_pd_remove();
+       pr_err("failed to create CPU PM domains ret=%d\n", ret);
+no_pd:
+       return ret;
+}
+
+#else
+
+static inline int sbi_genpd_probe(struct device_node *np)
+{
+       return 0;
+}
+
+#endif
+
+static int sbi_cpuidle_probe(struct platform_device *pdev)
+{
+       int cpu, ret;
+       struct cpuidle_driver *drv;
+       struct cpuidle_device *dev;
+       struct device_node *np, *pds_node;
+
+       /* Detect OSI support based on CPU DT nodes */
+       sbi_cpuidle_use_osi = true;
+       for_each_possible_cpu(cpu) {
+               np = of_cpu_device_node_get(cpu);
+               if (np &&
+                   of_find_property(np, "power-domains", NULL) &&
+                   of_find_property(np, "power-domain-names", NULL)) {
+                       continue;
+               } else {
+                       sbi_cpuidle_use_osi = false;
+                       break;
+               }
+       }
+
+       /* Populate generic power domains from DT nodes */
+       pds_node = of_find_node_by_path("/cpus/power-domains");
+       if (pds_node) {
+               ret = sbi_genpd_probe(pds_node);
+               of_node_put(pds_node);
+               if (ret)
+                       return ret;
+       }
+
+       /* Initialize CPU idle driver for each CPU */
+       for_each_possible_cpu(cpu) {
+               ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
+               if (ret) {
+                       pr_debug("HART%ld: idle driver init failed\n",
+                                cpuid_to_hartid_map(cpu));
+                       goto out_fail;
+               }
+       }
+
+       /* Setup CPU hotplut notifiers */
+       sbi_idle_init_cpuhp();
+
+       pr_info("idle driver registered for all CPUs\n");
+
+       return 0;
+
+out_fail:
+       while (--cpu >= 0) {
+               dev = per_cpu(cpuidle_devices, cpu);
+               drv = cpuidle_get_cpu_driver(dev);
+               cpuidle_unregister(drv);
+               sbi_cpuidle_deinit_cpu(cpu);
+       }
+
+       return ret;
+}
+
+static struct platform_driver sbi_cpuidle_driver = {
+       .probe = sbi_cpuidle_probe,
+       .driver = {
+               .name = "sbi-cpuidle",
+               .sync_state = sbi_cpuidle_domain_sync_state,
+       },
+};
+
+static int __init sbi_cpuidle_init(void)
+{
+       int ret;
+       struct platform_device *pdev;
+
+       /*
+        * The SBI HSM suspend function is only available when:
+        * 1) SBI version is 0.3 or higher
+        * 2) SBI HSM extension is available
+        */
+       if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
+           sbi_probe_extension(SBI_EXT_HSM) <= 0) {
+               pr_info("HSM suspend not available\n");
+               return 0;
+       }
+
+       ret = platform_driver_register(&sbi_cpuidle_driver);
+       if (ret)
+               return ret;
+
+       pdev = platform_device_register_simple("sbi-cpuidle",
+                                               -1, NULL, 0);
+       if (IS_ERR(pdev)) {
+               platform_driver_unregister(&sbi_cpuidle_driver);
+               return PTR_ERR(pdev);
+       }
+
+       return 0;
+}
+device_initcall(sbi_cpuidle_init);
diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
new file mode 100644 (file)
index 0000000..b371655
--- /dev/null
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PM domains for CPUs via genpd.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "dt-idle-genpd: " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "dt_idle_genpd.h"
+
+static int pd_parse_state_nodes(
+                       int (*parse_state)(struct device_node *, u32 *),
+                       struct genpd_power_state *states, int state_count)
+{
+       int i, ret;
+       u32 state, *state_buf;
+
+       for (i = 0; i < state_count; i++) {
+               ret = parse_state(to_of_node(states[i].fwnode), &state);
+               if (ret)
+                       goto free_state;
+
+               state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+               if (!state_buf) {
+                       ret = -ENOMEM;
+                       goto free_state;
+               }
+               *state_buf = state;
+               states[i].data = state_buf;
+       }
+
+       return 0;
+
+free_state:
+       i--;
+       for (; i >= 0; i--)
+               kfree(states[i].data);
+       return ret;
+}
+
+static int pd_parse_states(struct device_node *np,
+                          int (*parse_state)(struct device_node *, u32 *),
+                          struct genpd_power_state **states,
+                          int *state_count)
+{
+       int ret;
+
+       /* Parse the domain idle states. */
+       ret = of_genpd_parse_idle_states(np, states, state_count);
+       if (ret)
+               return ret;
+
+       /* Fill out the dt specifics for each found state. */
+       ret = pd_parse_state_nodes(parse_state, *states, *state_count);
+       if (ret)
+               kfree(*states);
+
+       return ret;
+}
+
+static void pd_free_states(struct genpd_power_state *states,
+                           unsigned int state_count)
+{
+       int i;
+
+       for (i = 0; i < state_count; i++)
+               kfree(states[i].data);
+       kfree(states);
+}
+
+void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+       pd_free_states(pd->states, pd->state_count);
+       kfree(pd->name);
+       kfree(pd);
+}
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *))
+{
+       struct generic_pm_domain *pd;
+       struct genpd_power_state *states = NULL;
+       int ret, state_count = 0;
+
+       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               goto out;
+
+       pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
+       if (!pd->name)
+               goto free_pd;
+
+       /*
+        * Parse the domain idle states and let genpd manage the state selection
+        * for those being compatible with "domain-idle-state".
+        */
+       ret = pd_parse_states(np, parse_state, &states, &state_count);
+       if (ret)
+               goto free_name;
+
+       pd->free_states = pd_free_states;
+       pd->name = kbasename(pd->name);
+       pd->states = states;
+       pd->state_count = state_count;
+
+       pr_debug("alloc PM domain %s\n", pd->name);
+       return pd;
+
+free_name:
+       kfree(pd->name);
+free_pd:
+       kfree(pd);
+out:
+       pr_err("failed to alloc PM domain %pOF\n", np);
+       return NULL;
+}
+
+int dt_idle_pd_init_topology(struct device_node *np)
+{
+       struct device_node *node;
+       struct of_phandle_args child, parent;
+       int ret;
+
+       for_each_child_of_node(np, node) {
+               if (of_parse_phandle_with_args(node, "power-domains",
+                                       "#power-domain-cells", 0, &parent))
+                       continue;
+
+               child.np = node;
+               child.args_count = 0;
+               ret = of_genpd_add_subdomain(&parent, &child);
+               of_node_put(parent.np);
+               if (ret) {
+                       of_node_put(node);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+       struct device *dev;
+
+       dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name);
+       if (IS_ERR_OR_NULL(dev))
+               return dev;
+
+       pm_runtime_irq_safe(dev);
+       if (cpu_online(cpu))
+               pm_runtime_get_sync(dev);
+
+       dev_pm_syscore_device(dev, true);
+
+       return dev;
+}
+
+void dt_idle_detach_cpu(struct device *dev)
+{
+       if (IS_ERR_OR_NULL(dev))
+               return;
+
+       dev_pm_domain_detach(dev, false);
+}
diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h
new file mode 100644 (file)
index 0000000..a95483d
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_IDLE_GENPD
+#define __DT_IDLE_GENPD
+
+struct device_node;
+struct generic_pm_domain;
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+void dt_idle_pd_free(struct generic_pm_domain *pd);
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *));
+
+int dt_idle_pd_init_topology(struct device_node *np);
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name);
+
+void dt_idle_detach_cpu(struct device *dev);
+
+#else
+
+static inline void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+}
+
+static inline struct generic_pm_domain *dt_idle_pd_alloc(
+                       struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *))
+{
+       return NULL;
+}
+
+static inline int dt_idle_pd_init_topology(struct device_node *np)
+{
+       return 0;
+}
+
+static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+       return NULL;
+}
+
+static inline void dt_idle_detach_cpu(struct device *dev)
+{
+}
+
+#endif
+
+#endif
index be1bf39..90a920e 100644 (file)
@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
        struct stm32_crc *crc = platform_get_drvdata(pdev);
        int ret = pm_runtime_get_sync(crc->dev);
 
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(crc->dev);
                return ret;
+       }
 
        spin_lock(&crc_list.lock);
        list_del(&crc->list);
index b894e3a..5f8915f 100644 (file)
@@ -3,8 +3,11 @@ config CRYPTO_DEV_VIRTIO
        tristate "VirtIO crypto driver"
        depends on VIRTIO
        select CRYPTO_AEAD
+       select CRYPTO_AKCIPHER2
        select CRYPTO_SKCIPHER
        select CRYPTO_ENGINE
+       select CRYPTO_RSA
+       select MPILIB
        help
          This driver provides support for virtio crypto device. If you
          choose 'M' here, this module will be called virtio_crypto.
index cbfcccc..bfa6cba 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
 virtio_crypto-objs := \
-       virtio_crypto_algs.o \
+       virtio_crypto_skcipher_algs.o \
+       virtio_crypto_akcipher_algs.o \
        virtio_crypto_mgr.o \
        virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
new file mode 100644 (file)
index 0000000..f3ec942
--- /dev/null
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Asymmetric algorithms supported by virtio crypto device
+  *
+  * Authors: zhenwei pi <pizhenwei@bytedance.com>
+  *          lei he <helei.sig11@bytedance.com>
+  *
+  * Copyright 2022 Bytedance CO., LTD.
+  */
+
+#include <linux/mpi.h>
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+struct virtio_crypto_rsa_ctx {
+       MPI n;
+};
+
+struct virtio_crypto_akcipher_ctx {
+       struct crypto_engine_ctx enginectx;
+       struct virtio_crypto *vcrypto;
+       struct crypto_akcipher *tfm;
+       bool session_valid;
+       __u64 session_id;
+       union {
+               struct virtio_crypto_rsa_ctx rsa_ctx;
+       };
+};
+
+struct virtio_crypto_akcipher_request {
+       struct virtio_crypto_request base;
+       struct virtio_crypto_akcipher_ctx *akcipher_ctx;
+       struct akcipher_request *akcipher_req;
+       void *src_buf;
+       void *dst_buf;
+       uint32_t opcode;
+};
+
+struct virtio_crypto_akcipher_algo {
+       uint32_t algonum;
+       uint32_t service;
+       unsigned int active_devs;
+       struct akcipher_alg algo;
+};
+
+static DEFINE_MUTEX(algs_lock);
+
+static void virtio_crypto_akcipher_finalize_req(
+       struct virtio_crypto_akcipher_request *vc_akcipher_req,
+       struct akcipher_request *req, int err)
+{
+       virtcrypto_clear_request(&vc_akcipher_req->base);
+
+       crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
+}
+
+static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
+{
+       struct virtio_crypto_akcipher_request *vc_akcipher_req =
+               container_of(vc_req, struct virtio_crypto_akcipher_request, base);
+       struct akcipher_request *akcipher_req;
+       int error;
+
+       switch (vc_req->status) {
+       case VIRTIO_CRYPTO_OK:
+               error = 0;
+               break;
+       case VIRTIO_CRYPTO_INVSESS:
+       case VIRTIO_CRYPTO_ERR:
+               error = -EINVAL;
+               break;
+       case VIRTIO_CRYPTO_BADMSG:
+               error = -EBADMSG;
+               break;
+
+       case VIRTIO_CRYPTO_KEY_REJECTED:
+               error = -EKEYREJECTED;
+               break;
+
+       default:
+               error = -EIO;
+               break;
+       }
+
+       akcipher_req = vc_akcipher_req->akcipher_req;
+       if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
+               sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
+                                   vc_akcipher_req->dst_buf, akcipher_req->dst_len);
+       virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
+}
+
+static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
+               struct virtio_crypto_ctrl_header *header, void *para,
+               const uint8_t *key, unsigned int keylen)
+{
+       struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       uint8_t *pkey;
+       unsigned int inlen;
+       int err;
+       unsigned int num_out = 0, num_in = 0;
+
+       pkey = kmemdup(key, keylen, GFP_ATOMIC);
+       if (!pkey)
+               return -ENOMEM;
+
+       spin_lock(&vcrypto->ctrl_lock);
+       memcpy(&vcrypto->ctrl.header, header, sizeof(vcrypto->ctrl.header));
+       memcpy(&vcrypto->ctrl.u, para, sizeof(vcrypto->ctrl.u));
+       vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+       sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sgs[num_out++] = &outhdr_sg;
+
+       sg_init_one(&key_sg, pkey, keylen);
+       sgs[num_out++] = &key_sg;
+
+       sg_init_one(&inhdr_sg, &vcrypto->input, sizeof(vcrypto->input));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+       if (err < 0)
+               goto out;
+
+       virtqueue_kick(vcrypto->ctrl_vq);
+       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
+              !virtqueue_is_broken(vcrypto->ctrl_vq))
+               cpu_relax();
+
+       if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       ctx->session_id = le64_to_cpu(vcrypto->input.session_id);
+       ctx->session_valid = true;
+       err = 0;
+
+out:
+       spin_unlock(&vcrypto->ctrl_lock);
+       kfree_sensitive(pkey);
+
+       if (err < 0)
+               pr_err("virtio_crypto: Create session failed status: %u\n",
+                       le32_to_cpu(vcrypto->input.status));
+
+       return err;
+}
+
+static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
+{
+       struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
+       struct virtio_crypto_destroy_session_req *destroy_session;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       unsigned int num_out = 0, num_in = 0, inlen;
+       int err;
+
+       spin_lock(&vcrypto->ctrl_lock);
+       if (!ctx->session_valid) {
+               err = 0;
+               goto out;
+       }
+       vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+       vcrypto->ctrl.header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
+       vcrypto->ctrl.header.queue_id = 0;
+
+       destroy_session = &vcrypto->ctrl.u.destroy_session;
+       destroy_session->session_id = cpu_to_le64(ctx->session_id);
+
+       sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sgs[num_out++] = &outhdr_sg;
+
+       sg_init_one(&inhdr_sg, &vcrypto->ctrl_status.status, sizeof(vcrypto->ctrl_status.status));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+       if (err < 0)
+               goto out;
+
+       virtqueue_kick(vcrypto->ctrl_vq);
+       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
+              !virtqueue_is_broken(vcrypto->ctrl_vq))
+               cpu_relax();
+
+       if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = 0;
+       ctx->session_valid = false;
+
+out:
+       spin_unlock(&vcrypto->ctrl_lock);
+       if (err < 0) {
+               pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+                       vcrypto->ctrl_status.status, destroy_session->session_id);
+       }
+
+       return err;
+}
+
+static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
+               struct akcipher_request *req, struct data_queue *data_vq)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
+       struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
+       void *src_buf = NULL, *dst_buf = NULL;
+       unsigned int num_out = 0, num_in = 0;
+       int node = dev_to_node(&vcrypto->vdev->dev);
+       unsigned long flags;
+       int ret = -ENOMEM;
+       bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
+       unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
+
+       /* out header */
+       sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
+       sgs[num_out++] = &outhdr_sg;
+
+       /* src data */
+       src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
+       if (!src_buf)
+               goto err;
+
+       if (verify) {
+               /* for verify operation, both src and dst data work as OUT direction */
+               sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
+               sg_init_one(&srcdata_sg, src_buf, src_len);
+               sgs[num_out++] = &srcdata_sg;
+       } else {
+               sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
+               sg_init_one(&srcdata_sg, src_buf, src_len);
+               sgs[num_out++] = &srcdata_sg;
+
+               /* dst data */
+               dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
+               if (!dst_buf)
+                       goto err;
+
+               sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
+               sgs[num_out + num_in++] = &dstdata_sg;
+       }
+
+       vc_akcipher_req->src_buf = src_buf;
+       vc_akcipher_req->dst_buf = dst_buf;
+
+       /* in header */
+       sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       spin_lock_irqsave(&data_vq->lock, flags);
+       ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
+       virtqueue_kick(data_vq->vq);
+       spin_unlock_irqrestore(&data_vq->lock, flags);
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       kfree(src_buf);
+       kfree(dst_buf);
+
+       return -ENOMEM;
+}
+
+static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
+{
+       struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
+       struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       struct data_queue *data_vq = vc_req->dataq;
+       struct virtio_crypto_op_header *header;
+       struct virtio_crypto_akcipher_data_req *akcipher_req;
+       int ret;
+
+       vc_req->sgs = NULL;
+       vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
+               GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
+       if (!vc_req->req_data)
+               return -ENOMEM;
+
+       /* build request header */
+       header = &vc_req->req_data->header;
+       header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
+       header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       header->session_id = cpu_to_le64(ctx->session_id);
+
+       /* build request akcipher data */
+       akcipher_req = &vc_req->req_data->u.akcipher_req;
+       akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
+       akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
+
+       ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
+       if (ret < 0) {
+               kfree_sensitive(vc_req->req_data);
+               vc_req->req_data = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
+static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
+{
+       struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
+       struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       /* Use the first data virtqueue as default */
+       struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+       vc_req->dataq = data_vq;
+       vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
+       vc_akcipher_req->akcipher_ctx = ctx;
+       vc_akcipher_req->akcipher_req = req;
+       vc_akcipher_req->opcode = opcode;
+
+       return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
+}
+
+static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
+}
+
+static int virtio_crypto_rsa_sign(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
+}
+
+static int virtio_crypto_rsa_verify(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
+}
+
+static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
+                                    const void *key,
+                                    unsigned int keylen,
+                                    bool private,
+                                    int padding_algo,
+                                    int hash_algo)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+       struct virtio_crypto *vcrypto;
+       struct virtio_crypto_ctrl_header header;
+       struct virtio_crypto_akcipher_session_para para;
+       struct rsa_key rsa_key = {0};
+       int node = virtio_crypto_get_current_node();
+       uint32_t keytype;
+       int ret;
+
+       /* mpi_free will test n, just free it. */
+       mpi_free(rsa_ctx->n);
+       rsa_ctx->n = NULL;
+
+       if (private) {
+               keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
+               ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+       } else {
+               keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
+               ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+       }
+
+       if (ret)
+               return ret;
+
+       rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
+       if (!rsa_ctx->n)
+               return -ENOMEM;
+
+       if (!ctx->vcrypto) {
+               vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+                                               VIRTIO_CRYPTO_AKCIPHER_RSA);
+               if (!vcrypto) {
+                       pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
+                       return -ENODEV;
+               }
+
+               ctx->vcrypto = vcrypto;
+       } else {
+               virtio_crypto_alg_akcipher_close_session(ctx);
+       }
+
+       /* set ctrl header */
+       header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
+       header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       header.queue_id = 0;
+
+       /* set RSA para */
+       para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       para.keytype = cpu_to_le32(keytype);
+       para.keylen = cpu_to_le32(keylen);
+       para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
+       para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
+
+       return virtio_crypto_alg_akcipher_init_session(ctx, &header, &para, key, keylen);
+}
+
+static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
+                                             const void *key,
+                                             unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
+                                        VIRTIO_CRYPTO_RSA_RAW_PADDING,
+                                        VIRTIO_CRYPTO_RSA_NO_HASH);
+}
+
+
+static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
+                                                    const void *key,
+                                                    unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
+                                        VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
+                                        VIRTIO_CRYPTO_RSA_SHA1);
+}
+
+static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
+                                            const void *key,
+                                            unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
+                                        VIRTIO_CRYPTO_RSA_RAW_PADDING,
+                                        VIRTIO_CRYPTO_RSA_NO_HASH);
+}
+
+static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
+                                                   const void *key,
+                                                   unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
+                                        VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
+                                        VIRTIO_CRYPTO_RSA_SHA1);
+}
+
+static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+
+       return mpi_get_size(rsa_ctx->n);
+}
+
+static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       ctx->tfm = tfm;
+       ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
+       ctx->enginectx.op.prepare_request = NULL;
+       ctx->enginectx.op.unprepare_request = NULL;
+
+       return 0;
+}
+
+static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+
+       virtio_crypto_alg_akcipher_close_session(ctx);
+       virtcrypto_dev_put(ctx->vcrypto);
+       mpi_free(rsa_ctx->n);
+       rsa_ctx->n = NULL;
+}
+
+static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
+       {
+               .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
+               .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+               .algo = {
+                       .encrypt = virtio_crypto_rsa_encrypt,
+                       .decrypt = virtio_crypto_rsa_decrypt,
+                       .set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
+                       .set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
+                       .max_size = virtio_crypto_rsa_max_size,
+                       .init = virtio_crypto_rsa_init_tfm,
+                       .exit = virtio_crypto_rsa_exit_tfm,
+                       .reqsize = sizeof(struct virtio_crypto_akcipher_request),
+                       .base = {
+                               .cra_name = "rsa",
+                               .cra_driver_name = "virtio-crypto-rsa",
+                               .cra_priority = 150,
+                               .cra_module = THIS_MODULE,
+                               .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
+                       },
+               },
+       },
+       {
+               .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
+               .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+               .algo = {
+                       .encrypt = virtio_crypto_rsa_encrypt,
+                       .decrypt = virtio_crypto_rsa_decrypt,
+                       .sign = virtio_crypto_rsa_sign,
+                       .verify = virtio_crypto_rsa_verify,
+                       .set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
+                       .set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
+                       .max_size = virtio_crypto_rsa_max_size,
+                       .init = virtio_crypto_rsa_init_tfm,
+                       .exit = virtio_crypto_rsa_exit_tfm,
+                       .reqsize = sizeof(struct virtio_crypto_akcipher_request),
+                       .base = {
+                               .cra_name = "pkcs1pad(rsa,sha1)",
+                               .cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
+                               .cra_priority = 150,
+                               .cra_module = THIS_MODULE,
+                               .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
+                       },
+               },
+       },
+};
+
+int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
+{
+       int ret = 0;
+       int i = 0;
+
+       mutex_lock(&algs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
+               uint32_t service = virtio_crypto_akcipher_algs[i].service;
+               uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
+
+               if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
+                       ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+                       if (ret)
+                               goto unlock;
+               }
+
+               virtio_crypto_akcipher_algs[i].active_devs++;
+               dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
+                        virtio_crypto_akcipher_algs[i].algo.base.cra_name);
+       }
+
+unlock:
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
+{
+       int i = 0;
+
+       mutex_lock(&algs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
+               uint32_t service = virtio_crypto_akcipher_algs[i].service;
+               uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
+                   !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 1)
+                       crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+
+               virtio_crypto_akcipher_algs[i].active_devs--;
+       }
+
+       mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
deleted file mode 100644 (file)
index 583c0b5..0000000
+++ /dev/null
@@ -1,669 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
- /* Algorithms supported by virtio crypto device
-  *
-  * Authors: Gonglei <arei.gonglei@huawei.com>
-  *
-  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
-  */
-
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/err.h>
-#include <crypto/scatterwalk.h>
-#include <linux/atomic.h>
-
-#include <uapi/linux/virtio_crypto.h>
-#include "virtio_crypto_common.h"
-
-
-struct virtio_crypto_skcipher_ctx {
-       struct crypto_engine_ctx enginectx;
-       struct virtio_crypto *vcrypto;
-       struct crypto_skcipher *tfm;
-
-       struct virtio_crypto_sym_session_info enc_sess_info;
-       struct virtio_crypto_sym_session_info dec_sess_info;
-};
-
-struct virtio_crypto_sym_request {
-       struct virtio_crypto_request base;
-
-       /* Cipher or aead */
-       uint32_t type;
-       struct virtio_crypto_skcipher_ctx *skcipher_ctx;
-       struct skcipher_request *skcipher_req;
-       uint8_t *iv;
-       /* Encryption? */
-       bool encrypt;
-};
-
-struct virtio_crypto_algo {
-       uint32_t algonum;
-       uint32_t service;
-       unsigned int active_devs;
-       struct skcipher_alg algo;
-};
-
-/*
- * The algs_lock protects the below global virtio_crypto_active_devs
- * and crypto algorithms registion.
- */
-static DEFINE_MUTEX(algs_lock);
-static void virtio_crypto_skcipher_finalize_req(
-       struct virtio_crypto_sym_request *vc_sym_req,
-       struct skcipher_request *req,
-       int err);
-
-static void virtio_crypto_dataq_sym_callback
-               (struct virtio_crypto_request *vc_req, int len)
-{
-       struct virtio_crypto_sym_request *vc_sym_req =
-               container_of(vc_req, struct virtio_crypto_sym_request, base);
-       struct skcipher_request *ablk_req;
-       int error;
-
-       /* Finish the encrypt or decrypt process */
-       if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
-               switch (vc_req->status) {
-               case VIRTIO_CRYPTO_OK:
-                       error = 0;
-                       break;
-               case VIRTIO_CRYPTO_INVSESS:
-               case VIRTIO_CRYPTO_ERR:
-                       error = -EINVAL;
-                       break;
-               case VIRTIO_CRYPTO_BADMSG:
-                       error = -EBADMSG;
-                       break;
-               default:
-                       error = -EIO;
-                       break;
-               }
-               ablk_req = vc_sym_req->skcipher_req;
-               virtio_crypto_skcipher_finalize_req(vc_sym_req,
-                                                       ablk_req, error);
-       }
-}
-
-static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
-{
-       u64 total = 0;
-
-       for (total = 0; sg; sg = sg_next(sg))
-               total += sg->length;
-
-       return total;
-}
-
-static int
-virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
-{
-       switch (key_len) {
-       case AES_KEYSIZE_128:
-       case AES_KEYSIZE_192:
-       case AES_KEYSIZE_256:
-               *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int virtio_crypto_alg_skcipher_init_session(
-               struct virtio_crypto_skcipher_ctx *ctx,
-               uint32_t alg, const uint8_t *key,
-               unsigned int keylen,
-               int encrypt)
-{
-       struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
-       unsigned int tmp;
-       struct virtio_crypto *vcrypto = ctx->vcrypto;
-       int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
-       int err;
-       unsigned int num_out = 0, num_in = 0;
-
-       /*
-        * Avoid to do DMA from the stack, switch to using
-        * dynamically-allocated for the key
-        */
-       uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
-
-       if (!cipher_key)
-               return -ENOMEM;
-
-       spin_lock(&vcrypto->ctrl_lock);
-       /* Pad ctrl header */
-       vcrypto->ctrl.header.opcode =
-               cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
-       vcrypto->ctrl.header.algo = cpu_to_le32(alg);
-       /* Set the default dataqueue id to 0 */
-       vcrypto->ctrl.header.queue_id = 0;
-
-       vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
-       /* Pad cipher's parameters */
-       vcrypto->ctrl.u.sym_create_session.op_type =
-               cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
-       vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
-               vcrypto->ctrl.header.algo;
-       vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
-               cpu_to_le32(keylen);
-       vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
-               cpu_to_le32(op);
-
-       sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
-       sgs[num_out++] = &outhdr;
-
-       /* Set key */
-       sg_init_one(&key_sg, cipher_key, keylen);
-       sgs[num_out++] = &key_sg;
-
-       /* Return status and session id back */
-       sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
-       sgs[num_out + num_in++] = &inhdr;
-
-       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
-                               num_in, vcrypto, GFP_ATOMIC);
-       if (err < 0) {
-               spin_unlock(&vcrypto->ctrl_lock);
-               kfree_sensitive(cipher_key);
-               return err;
-       }
-       virtqueue_kick(vcrypto->ctrl_vq);
-
-       /*
-        * Trapping into the hypervisor, so the request should be
-        * handled immediately.
-        */
-       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
-              !virtqueue_is_broken(vcrypto->ctrl_vq))
-               cpu_relax();
-
-       if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
-               spin_unlock(&vcrypto->ctrl_lock);
-               pr_err("virtio_crypto: Create session failed status: %u\n",
-                       le32_to_cpu(vcrypto->input.status));
-               kfree_sensitive(cipher_key);
-               return -EINVAL;
-       }
-
-       if (encrypt)
-               ctx->enc_sess_info.session_id =
-                       le64_to_cpu(vcrypto->input.session_id);
-       else
-               ctx->dec_sess_info.session_id =
-                       le64_to_cpu(vcrypto->input.session_id);
-
-       spin_unlock(&vcrypto->ctrl_lock);
-
-       kfree_sensitive(cipher_key);
-       return 0;
-}
-
-static int virtio_crypto_alg_skcipher_close_session(
-               struct virtio_crypto_skcipher_ctx *ctx,
-               int encrypt)
-{
-       struct scatterlist outhdr, status_sg, *sgs[2];
-       unsigned int tmp;
-       struct virtio_crypto_destroy_session_req *destroy_session;
-       struct virtio_crypto *vcrypto = ctx->vcrypto;
-       int err;
-       unsigned int num_out = 0, num_in = 0;
-
-       spin_lock(&vcrypto->ctrl_lock);
-       vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
-       /* Pad ctrl header */
-       vcrypto->ctrl.header.opcode =
-               cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
-       /* Set the default virtqueue id to 0 */
-       vcrypto->ctrl.header.queue_id = 0;
-
-       destroy_session = &vcrypto->ctrl.u.destroy_session;
-
-       if (encrypt)
-               destroy_session->session_id =
-                       cpu_to_le64(ctx->enc_sess_info.session_id);
-       else
-               destroy_session->session_id =
-                       cpu_to_le64(ctx->dec_sess_info.session_id);
-
-       sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
-       sgs[num_out++] = &outhdr;
-
-       /* Return status and session id back */
-       sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
-               sizeof(vcrypto->ctrl_status.status));
-       sgs[num_out + num_in++] = &status_sg;
-
-       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
-                       num_in, vcrypto, GFP_ATOMIC);
-       if (err < 0) {
-               spin_unlock(&vcrypto->ctrl_lock);
-               return err;
-       }
-       virtqueue_kick(vcrypto->ctrl_vq);
-
-       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
-              !virtqueue_is_broken(vcrypto->ctrl_vq))
-               cpu_relax();
-
-       if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
-               spin_unlock(&vcrypto->ctrl_lock);
-               pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
-                       vcrypto->ctrl_status.status,
-                       destroy_session->session_id);
-
-               return -EINVAL;
-       }
-       spin_unlock(&vcrypto->ctrl_lock);
-
-       return 0;
-}
-
-static int virtio_crypto_alg_skcipher_init_sessions(
-               struct virtio_crypto_skcipher_ctx *ctx,
-               const uint8_t *key, unsigned int keylen)
-{
-       uint32_t alg;
-       int ret;
-       struct virtio_crypto *vcrypto = ctx->vcrypto;
-
-       if (keylen > vcrypto->max_cipher_key_len) {
-               pr_err("virtio_crypto: the key is too long\n");
-               return -EINVAL;
-       }
-
-       if (virtio_crypto_alg_validate_key(keylen, &alg))
-               return -EINVAL;
-
-       /* Create encryption session */
-       ret = virtio_crypto_alg_skcipher_init_session(ctx,
-                       alg, key, keylen, 1);
-       if (ret)
-               return ret;
-       /* Create decryption session */
-       ret = virtio_crypto_alg_skcipher_init_session(ctx,
-                       alg, key, keylen, 0);
-       if (ret) {
-               virtio_crypto_alg_skcipher_close_session(ctx, 1);
-               return ret;
-       }
-       return 0;
-}
-
-/* Note: kernel crypto API realization */
-static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
-                                        const uint8_t *key,
-                                        unsigned int keylen)
-{
-       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-       uint32_t alg;
-       int ret;
-
-       ret = virtio_crypto_alg_validate_key(keylen, &alg);
-       if (ret)
-               return ret;
-
-       if (!ctx->vcrypto) {
-               /* New key */
-               int node = virtio_crypto_get_current_node();
-               struct virtio_crypto *vcrypto =
-                                     virtcrypto_get_dev_node(node,
-                                     VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
-               if (!vcrypto) {
-                       pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
-                       return -ENODEV;
-               }
-
-               ctx->vcrypto = vcrypto;
-       } else {
-               /* Rekeying, we should close the created sessions previously */
-               virtio_crypto_alg_skcipher_close_session(ctx, 1);
-               virtio_crypto_alg_skcipher_close_session(ctx, 0);
-       }
-
-       ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
-       if (ret) {
-               virtcrypto_dev_put(ctx->vcrypto);
-               ctx->vcrypto = NULL;
-
-               return ret;
-       }
-
-       return 0;
-}
-
-static int
-__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
-               struct skcipher_request *req,
-               struct data_queue *data_vq)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
-       struct virtio_crypto_request *vc_req = &vc_sym_req->base;
-       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
-       struct virtio_crypto *vcrypto = ctx->vcrypto;
-       struct virtio_crypto_op_data_req *req_data;
-       int src_nents, dst_nents;
-       int err;
-       unsigned long flags;
-       struct scatterlist outhdr, iv_sg, status_sg, **sgs;
-       u64 dst_len;
-       unsigned int num_out = 0, num_in = 0;
-       int sg_total;
-       uint8_t *iv;
-       struct scatterlist *sg;
-
-       src_nents = sg_nents_for_len(req->src, req->cryptlen);
-       if (src_nents < 0) {
-               pr_err("Invalid number of src SG.\n");
-               return src_nents;
-       }
-
-       dst_nents = sg_nents(req->dst);
-
-       pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
-                       src_nents, dst_nents);
-
-       /* Why 3?  outhdr + iv + inhdr */
-       sg_total = src_nents + dst_nents + 3;
-       sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
-                               dev_to_node(&vcrypto->vdev->dev));
-       if (!sgs)
-               return -ENOMEM;
-
-       req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
-                               dev_to_node(&vcrypto->vdev->dev));
-       if (!req_data) {
-               kfree(sgs);
-               return -ENOMEM;
-       }
-
-       vc_req->req_data = req_data;
-       vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
-       /* Head of operation */
-       if (vc_sym_req->encrypt) {
-               req_data->header.session_id =
-                       cpu_to_le64(ctx->enc_sess_info.session_id);
-               req_data->header.opcode =
-                       cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
-       } else {
-               req_data->header.session_id =
-                       cpu_to_le64(ctx->dec_sess_info.session_id);
-               req_data->header.opcode =
-                       cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
-       }
-       req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
-       req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
-       req_data->u.sym_req.u.cipher.para.src_data_len =
-                       cpu_to_le32(req->cryptlen);
-
-       dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
-       if (unlikely(dst_len > U32_MAX)) {
-               pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
-               err = -EINVAL;
-               goto free;
-       }
-
-       dst_len = min_t(unsigned int, req->cryptlen, dst_len);
-       pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
-                       req->cryptlen, dst_len);
-
-       if (unlikely(req->cryptlen + dst_len + ivsize +
-               sizeof(vc_req->status) > vcrypto->max_size)) {
-               pr_err("virtio_crypto: The length is too big\n");
-               err = -EINVAL;
-               goto free;
-       }
-
-       req_data->u.sym_req.u.cipher.para.dst_data_len =
-                       cpu_to_le32((uint32_t)dst_len);
-
-       /* Outhdr */
-       sg_init_one(&outhdr, req_data, sizeof(*req_data));
-       sgs[num_out++] = &outhdr;
-
-       /* IV */
-
-       /*
-        * Avoid to do DMA from the stack, switch to using
-        * dynamically-allocated for the IV
-        */
-       iv = kzalloc_node(ivsize, GFP_ATOMIC,
-                               dev_to_node(&vcrypto->vdev->dev));
-       if (!iv) {
-               err = -ENOMEM;
-               goto free;
-       }
-       memcpy(iv, req->iv, ivsize);
-       if (!vc_sym_req->encrypt)
-               scatterwalk_map_and_copy(req->iv, req->src,
-                                        req->cryptlen - AES_BLOCK_SIZE,
-                                        AES_BLOCK_SIZE, 0);
-
-       sg_init_one(&iv_sg, iv, ivsize);
-       sgs[num_out++] = &iv_sg;
-       vc_sym_req->iv = iv;
-
-       /* Source data */
-       for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
-               sgs[num_out++] = sg;
-
-       /* Destination data */
-       for (sg = req->dst; sg; sg = sg_next(sg))
-               sgs[num_out + num_in++] = sg;
-
-       /* Status */
-       sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
-       sgs[num_out + num_in++] = &status_sg;
-
-       vc_req->sgs = sgs;
-
-       spin_lock_irqsave(&data_vq->lock, flags);
-       err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
-                               num_in, vc_req, GFP_ATOMIC);
-       virtqueue_kick(data_vq->vq);
-       spin_unlock_irqrestore(&data_vq->lock, flags);
-       if (unlikely(err < 0))
-               goto free_iv;
-
-       return 0;
-
-free_iv:
-       kfree_sensitive(iv);
-free:
-       kfree_sensitive(req_data);
-       kfree(sgs);
-       return err;
-}
-
-static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
-       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
-       struct virtio_crypto_sym_request *vc_sym_req =
-                               skcipher_request_ctx(req);
-       struct virtio_crypto_request *vc_req = &vc_sym_req->base;
-       struct virtio_crypto *vcrypto = ctx->vcrypto;
-       /* Use the first data virtqueue as default */
-       struct data_queue *data_vq = &vcrypto->data_vq[0];
-
-       if (!req->cryptlen)
-               return 0;
-       if (req->cryptlen % AES_BLOCK_SIZE)
-               return -EINVAL;
-
-       vc_req->dataq = data_vq;
-       vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
-       vc_sym_req->skcipher_ctx = ctx;
-       vc_sym_req->skcipher_req = req;
-       vc_sym_req->encrypt = true;
-
-       return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
-}
-
-static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
-       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
-       struct virtio_crypto_sym_request *vc_sym_req =
-                               skcipher_request_ctx(req);
-       struct virtio_crypto_request *vc_req = &vc_sym_req->base;
-       struct virtio_crypto *vcrypto = ctx->vcrypto;
-       /* Use the first data virtqueue as default */
-       struct data_queue *data_vq = &vcrypto->data_vq[0];
-
-       if (!req->cryptlen)
-               return 0;
-       if (req->cryptlen % AES_BLOCK_SIZE)
-               return -EINVAL;
-
-       vc_req->dataq = data_vq;
-       vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
-       vc_sym_req->skcipher_ctx = ctx;
-       vc_sym_req->skcipher_req = req;
-       vc_sym_req->encrypt = false;
-
-       return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
-}
-
-static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
-{
-       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
-       ctx->tfm = tfm;
-
-       ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
-       ctx->enginectx.op.prepare_request = NULL;
-       ctx->enginectx.op.unprepare_request = NULL;
-       return 0;
-}
-
-static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
-{
-       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-       if (!ctx->vcrypto)
-               return;
-
-       virtio_crypto_alg_skcipher_close_session(ctx, 1);
-       virtio_crypto_alg_skcipher_close_session(ctx, 0);
-       virtcrypto_dev_put(ctx->vcrypto);
-       ctx->vcrypto = NULL;
-}
-
-int virtio_crypto_skcipher_crypt_req(
-       struct crypto_engine *engine, void *vreq)
-{
-       struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
-       struct virtio_crypto_sym_request *vc_sym_req =
-                               skcipher_request_ctx(req);
-       struct virtio_crypto_request *vc_req = &vc_sym_req->base;
-       struct data_queue *data_vq = vc_req->dataq;
-       int ret;
-
-       ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
-       if (ret < 0)
-               return ret;
-
-       virtqueue_kick(data_vq->vq);
-
-       return 0;
-}
-
-static void virtio_crypto_skcipher_finalize_req(
-       struct virtio_crypto_sym_request *vc_sym_req,
-       struct skcipher_request *req,
-       int err)
-{
-       if (vc_sym_req->encrypt)
-               scatterwalk_map_and_copy(req->iv, req->dst,
-                                        req->cryptlen - AES_BLOCK_SIZE,
-                                        AES_BLOCK_SIZE, 0);
-       kfree_sensitive(vc_sym_req->iv);
-       virtcrypto_clear_request(&vc_sym_req->base);
-
-       crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
-                                          req, err);
-}
-
-static struct virtio_crypto_algo virtio_crypto_algs[] = { {
-       .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
-       .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
-       .algo = {
-               .base.cra_name          = "cbc(aes)",
-               .base.cra_driver_name   = "virtio_crypto_aes_cbc",
-               .base.cra_priority      = 150,
-               .base.cra_flags         = CRYPTO_ALG_ASYNC |
-                                         CRYPTO_ALG_ALLOCATES_MEMORY,
-               .base.cra_blocksize     = AES_BLOCK_SIZE,
-               .base.cra_ctxsize       = sizeof(struct virtio_crypto_skcipher_ctx),
-               .base.cra_module        = THIS_MODULE,
-               .init                   = virtio_crypto_skcipher_init,
-               .exit                   = virtio_crypto_skcipher_exit,
-               .setkey                 = virtio_crypto_skcipher_setkey,
-               .decrypt                = virtio_crypto_skcipher_decrypt,
-               .encrypt                = virtio_crypto_skcipher_encrypt,
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-               .ivsize                 = AES_BLOCK_SIZE,
-       },
-} };
-
-int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
-{
-       int ret = 0;
-       int i = 0;
-
-       mutex_lock(&algs_lock);
-
-       for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
-
-               uint32_t service = virtio_crypto_algs[i].service;
-               uint32_t algonum = virtio_crypto_algs[i].algonum;
-
-               if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
-                       continue;
-
-               if (virtio_crypto_algs[i].active_devs == 0) {
-                       ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
-                       if (ret)
-                               goto unlock;
-               }
-
-               virtio_crypto_algs[i].active_devs++;
-               dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
-                        virtio_crypto_algs[i].algo.base.cra_name);
-       }
-
-unlock:
-       mutex_unlock(&algs_lock);
-       return ret;
-}
-
-void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
-{
-       int i = 0;
-
-       mutex_lock(&algs_lock);
-
-       for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
-
-               uint32_t service = virtio_crypto_algs[i].service;
-               uint32_t algonum = virtio_crypto_algs[i].algonum;
-
-               if (virtio_crypto_algs[i].active_devs == 0 ||
-                   !virtcrypto_algo_is_supported(vcrypto, service, algonum))
-                       continue;
-
-               if (virtio_crypto_algs[i].active_devs == 1)
-                       crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
-
-               virtio_crypto_algs[i].active_devs--;
-       }
-
-       mutex_unlock(&algs_lock);
-}
index a24f85c..e693d4e 100644 (file)
@@ -56,6 +56,7 @@ struct virtio_crypto {
        u32 mac_algo_l;
        u32 mac_algo_h;
        u32 aead_algo;
+       u32 akcipher_algo;
 
        /* Maximum length of cipher key */
        u32 max_cipher_key_len;
@@ -129,7 +130,9 @@ static inline int virtio_crypto_get_current_node(void)
        return node;
 }
 
-int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
-void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
 
 #endif /* _VIRTIO_CRYPTO_COMMON_H */
index 8e977b7..c6f482d 100644 (file)
@@ -297,6 +297,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
        u32 mac_algo_l = 0;
        u32 mac_algo_h = 0;
        u32 aead_algo = 0;
+       u32 akcipher_algo = 0;
        u32 crypto_services = 0;
 
        if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
@@ -348,6 +349,9 @@ static int virtcrypto_probe(struct virtio_device *vdev)
                        mac_algo_h, &mac_algo_h);
        virtio_cread_le(vdev, struct virtio_crypto_config,
                        aead_algo, &aead_algo);
+       if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
+               virtio_cread_le(vdev, struct virtio_crypto_config,
+                               akcipher_algo, &akcipher_algo);
 
        /* Add virtio crypto device to global table */
        err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -374,7 +378,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
        vcrypto->mac_algo_h = mac_algo_h;
        vcrypto->hash_algo = hash_algo;
        vcrypto->aead_algo = aead_algo;
-
+       vcrypto->akcipher_algo = akcipher_algo;
 
        dev_info(&vdev->dev,
                "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
index 6860f81..70e778a 100644 (file)
@@ -237,8 +237,14 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
  */
 int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
 {
-       if (virtio_crypto_algs_register(vcrypto)) {
-               pr_err("virtio_crypto: Failed to register crypto algs\n");
+       if (virtio_crypto_skcipher_algs_register(vcrypto)) {
+               pr_err("virtio_crypto: Failed to register crypto skcipher algs\n");
+               return -EFAULT;
+       }
+
+       if (virtio_crypto_akcipher_algs_register(vcrypto)) {
+               pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
+               virtio_crypto_skcipher_algs_unregister(vcrypto);
                return -EFAULT;
        }
 
@@ -257,7 +263,8 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
  */
 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
 {
-       virtio_crypto_algs_unregister(vcrypto);
+       virtio_crypto_skcipher_algs_unregister(vcrypto);
+       virtio_crypto_akcipher_algs_unregister(vcrypto);
 }
 
 /*
@@ -312,6 +319,10 @@ bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
        case VIRTIO_CRYPTO_SERVICE_AEAD:
                algo_mask = vcrypto->aead_algo;
                break;
+
+       case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
+               algo_mask = vcrypto->akcipher_algo;
+               break;
        }
 
        if (!(algo_mask & (1u << algo)))
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
new file mode 100644 (file)
index 0000000..a618c46
--- /dev/null
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Algorithms supported by virtio crypto device
+  *
+  * Authors: Gonglei <arei.gonglei@huawei.com>
+  *
+  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+  */
+
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+struct virtio_crypto_skcipher_ctx {
+       struct crypto_engine_ctx enginectx;
+       struct virtio_crypto *vcrypto;
+       struct crypto_skcipher *tfm;
+
+       struct virtio_crypto_sym_session_info enc_sess_info;
+       struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_sym_request {
+       struct virtio_crypto_request base;
+
+       /* Cipher or aead */
+       uint32_t type;
+       struct virtio_crypto_skcipher_ctx *skcipher_ctx;
+       struct skcipher_request *skcipher_req;
+       uint8_t *iv;
+       /* Encryption? */
+       bool encrypt;
+};
+
+struct virtio_crypto_algo {
+       uint32_t algonum;
+       uint32_t service;
+       unsigned int active_devs;
+       struct skcipher_alg algo;
+};
+
+/*
+ * The algs_lock protects the below global virtio_crypto_active_devs
+ * and crypto algorithms registion.
+ */
+static DEFINE_MUTEX(algs_lock);
+static void virtio_crypto_skcipher_finalize_req(
+       struct virtio_crypto_sym_request *vc_sym_req,
+       struct skcipher_request *req,
+       int err);
+
+static void virtio_crypto_dataq_sym_callback
+               (struct virtio_crypto_request *vc_req, int len)
+{
+       struct virtio_crypto_sym_request *vc_sym_req =
+               container_of(vc_req, struct virtio_crypto_sym_request, base);
+       struct skcipher_request *ablk_req;
+       int error;
+
+       /* Finish the encrypt or decrypt process */
+       if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+               switch (vc_req->status) {
+               case VIRTIO_CRYPTO_OK:
+                       error = 0;
+                       break;
+               case VIRTIO_CRYPTO_INVSESS:
+               case VIRTIO_CRYPTO_ERR:
+                       error = -EINVAL;
+                       break;
+               case VIRTIO_CRYPTO_BADMSG:
+                       error = -EBADMSG;
+                       break;
+               default:
+                       error = -EIO;
+                       break;
+               }
+               ablk_req = vc_sym_req->skcipher_req;
+               virtio_crypto_skcipher_finalize_req(vc_sym_req,
+                                                       ablk_req, error);
+       }
+}
+
+static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
+{
+       u64 total = 0;
+
+       for (total = 0; sg; sg = sg_next(sg))
+               total += sg->length;
+
+       return total;
+}
+
+static int
+virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
+{
+       switch (key_len) {
+       case AES_KEYSIZE_128:
+       case AES_KEYSIZE_192:
+       case AES_KEYSIZE_256:
+               *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int virtio_crypto_alg_skcipher_init_session(
+               struct virtio_crypto_skcipher_ctx *ctx,
+               uint32_t alg, const uint8_t *key,
+               unsigned int keylen,
+               int encrypt)
+{
+       struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
+       unsigned int tmp;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
+       int err;
+       unsigned int num_out = 0, num_in = 0;
+
+       /*
+        * Avoid to do DMA from the stack, switch to using
+        * dynamically-allocated for the key
+        */
+       uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
+
+       if (!cipher_key)
+               return -ENOMEM;
+
+       spin_lock(&vcrypto->ctrl_lock);
+       /* Pad ctrl header */
+       vcrypto->ctrl.header.opcode =
+               cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+       vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+       /* Set the default dataqueue id to 0 */
+       vcrypto->ctrl.header.queue_id = 0;
+
+       vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+       /* Pad cipher's parameters */
+       vcrypto->ctrl.u.sym_create_session.op_type =
+               cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+       vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
+               vcrypto->ctrl.header.algo;
+       vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
+               cpu_to_le32(keylen);
+       vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
+               cpu_to_le32(op);
+
+       sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sgs[num_out++] = &outhdr;
+
+       /* Set key */
+       sg_init_one(&key_sg, cipher_key, keylen);
+       sgs[num_out++] = &key_sg;
+
+       /* Return status and session id back */
+       sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+       sgs[num_out + num_in++] = &inhdr;
+
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+                               num_in, vcrypto, GFP_ATOMIC);
+       if (err < 0) {
+               spin_unlock(&vcrypto->ctrl_lock);
+               kfree_sensitive(cipher_key);
+               return err;
+       }
+       virtqueue_kick(vcrypto->ctrl_vq);
+
+       /*
+        * Trapping into the hypervisor, so the request should be
+        * handled immediately.
+        */
+       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+              !virtqueue_is_broken(vcrypto->ctrl_vq))
+               cpu_relax();
+
+       if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+               spin_unlock(&vcrypto->ctrl_lock);
+               pr_err("virtio_crypto: Create session failed status: %u\n",
+                       le32_to_cpu(vcrypto->input.status));
+               kfree_sensitive(cipher_key);
+               return -EINVAL;
+       }
+
+       if (encrypt)
+               ctx->enc_sess_info.session_id =
+                       le64_to_cpu(vcrypto->input.session_id);
+       else
+               ctx->dec_sess_info.session_id =
+                       le64_to_cpu(vcrypto->input.session_id);
+
+       spin_unlock(&vcrypto->ctrl_lock);
+
+       kfree_sensitive(cipher_key);
+       return 0;
+}
+
+static int virtio_crypto_alg_skcipher_close_session(
+               struct virtio_crypto_skcipher_ctx *ctx,
+               int encrypt)
+{
+       struct scatterlist outhdr, status_sg, *sgs[2];
+       unsigned int tmp;
+       struct virtio_crypto_destroy_session_req *destroy_session;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       int err;
+       unsigned int num_out = 0, num_in = 0;
+
+       spin_lock(&vcrypto->ctrl_lock);
+       vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+       /* Pad ctrl header */
+       vcrypto->ctrl.header.opcode =
+               cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+       /* Set the default virtqueue id to 0 */
+       vcrypto->ctrl.header.queue_id = 0;
+
+       destroy_session = &vcrypto->ctrl.u.destroy_session;
+
+       if (encrypt)
+               destroy_session->session_id =
+                       cpu_to_le64(ctx->enc_sess_info.session_id);
+       else
+               destroy_session->session_id =
+                       cpu_to_le64(ctx->dec_sess_info.session_id);
+
+       sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sgs[num_out++] = &outhdr;
+
+       /* Return status and session id back */
+       sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
+               sizeof(vcrypto->ctrl_status.status));
+       sgs[num_out + num_in++] = &status_sg;
+
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+                       num_in, vcrypto, GFP_ATOMIC);
+       if (err < 0) {
+               spin_unlock(&vcrypto->ctrl_lock);
+               return err;
+       }
+       virtqueue_kick(vcrypto->ctrl_vq);
+
+       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+              !virtqueue_is_broken(vcrypto->ctrl_vq))
+               cpu_relax();
+
+       if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+               spin_unlock(&vcrypto->ctrl_lock);
+               pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+                       vcrypto->ctrl_status.status,
+                       destroy_session->session_id);
+
+               return -EINVAL;
+       }
+       spin_unlock(&vcrypto->ctrl_lock);
+
+       return 0;
+}
+
+static int virtio_crypto_alg_skcipher_init_sessions(
+               struct virtio_crypto_skcipher_ctx *ctx,
+               const uint8_t *key, unsigned int keylen)
+{
+       uint32_t alg;
+       int ret;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+
+       if (keylen > vcrypto->max_cipher_key_len) {
+               pr_err("virtio_crypto: the key is too long\n");
+               return -EINVAL;
+       }
+
+       if (virtio_crypto_alg_validate_key(keylen, &alg))
+               return -EINVAL;
+
+       /* Create encryption session */
+       ret = virtio_crypto_alg_skcipher_init_session(ctx,
+                       alg, key, keylen, 1);
+       if (ret)
+               return ret;
+       /* Create decryption session */
+       ret = virtio_crypto_alg_skcipher_init_session(ctx,
+                       alg, key, keylen, 0);
+       if (ret) {
+               virtio_crypto_alg_skcipher_close_session(ctx, 1);
+               return ret;
+       }
+       return 0;
+}
+
+/* Note: kernel crypto API realization */
+static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+                                        const uint8_t *key,
+                                        unsigned int keylen)
+{
+       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       uint32_t alg;
+       int ret;
+
+       ret = virtio_crypto_alg_validate_key(keylen, &alg);
+       if (ret)
+               return ret;
+
+       if (!ctx->vcrypto) {
+               /* New key */
+               int node = virtio_crypto_get_current_node();
+               struct virtio_crypto *vcrypto =
+                                     virtcrypto_get_dev_node(node,
+                                     VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
+               if (!vcrypto) {
+                       pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
+                       return -ENODEV;
+               }
+
+               ctx->vcrypto = vcrypto;
+       } else {
+               /* Rekeying, we should close the created sessions previously */
+               virtio_crypto_alg_skcipher_close_session(ctx, 1);
+               virtio_crypto_alg_skcipher_close_session(ctx, 0);
+       }
+
+       ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
+       if (ret) {
+               virtcrypto_dev_put(ctx->vcrypto);
+               ctx->vcrypto = NULL;
+
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+               struct skcipher_request *req,
+               struct data_queue *data_vq)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
+       struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       struct virtio_crypto_op_data_req *req_data;
+       int src_nents, dst_nents;
+       int err;
+       unsigned long flags;
+       struct scatterlist outhdr, iv_sg, status_sg, **sgs;
+       u64 dst_len;
+       unsigned int num_out = 0, num_in = 0;
+       int sg_total;
+       uint8_t *iv;
+       struct scatterlist *sg;
+
+       src_nents = sg_nents_for_len(req->src, req->cryptlen);
+       if (src_nents < 0) {
+               pr_err("Invalid number of src SG.\n");
+               return src_nents;
+       }
+
+       dst_nents = sg_nents(req->dst);
+
+       pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
+                       src_nents, dst_nents);
+
+       /* Why 3?  outhdr + iv + inhdr */
+       sg_total = src_nents + dst_nents + 3;
+       sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
+                               dev_to_node(&vcrypto->vdev->dev));
+       if (!sgs)
+               return -ENOMEM;
+
+       req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
+                               dev_to_node(&vcrypto->vdev->dev));
+       if (!req_data) {
+               kfree(sgs);
+               return -ENOMEM;
+       }
+
+       vc_req->req_data = req_data;
+       vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+       /* Head of operation */
+       if (vc_sym_req->encrypt) {
+               req_data->header.session_id =
+                       cpu_to_le64(ctx->enc_sess_info.session_id);
+               req_data->header.opcode =
+                       cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
+       } else {
+               req_data->header.session_id =
+                       cpu_to_le64(ctx->dec_sess_info.session_id);
+               req_data->header.opcode =
+                       cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
+       }
+       req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+       req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
+       req_data->u.sym_req.u.cipher.para.src_data_len =
+                       cpu_to_le32(req->cryptlen);
+
+       dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
+       if (unlikely(dst_len > U32_MAX)) {
+               pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
+               err = -EINVAL;
+               goto free;
+       }
+
+       dst_len = min_t(unsigned int, req->cryptlen, dst_len);
+       pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
+                       req->cryptlen, dst_len);
+
+       if (unlikely(req->cryptlen + dst_len + ivsize +
+               sizeof(vc_req->status) > vcrypto->max_size)) {
+               pr_err("virtio_crypto: The length is too big\n");
+               err = -EINVAL;
+               goto free;
+       }
+
+       req_data->u.sym_req.u.cipher.para.dst_data_len =
+                       cpu_to_le32((uint32_t)dst_len);
+
+       /* Outhdr */
+       sg_init_one(&outhdr, req_data, sizeof(*req_data));
+       sgs[num_out++] = &outhdr;
+
+       /* IV */
+
+       /*
+        * Avoid to do DMA from the stack, switch to using
+        * dynamically-allocated for the IV
+        */
+       iv = kzalloc_node(ivsize, GFP_ATOMIC,
+                               dev_to_node(&vcrypto->vdev->dev));
+       if (!iv) {
+               err = -ENOMEM;
+               goto free;
+       }
+       memcpy(iv, req->iv, ivsize);
+       if (!vc_sym_req->encrypt)
+               scatterwalk_map_and_copy(req->iv, req->src,
+                                        req->cryptlen - AES_BLOCK_SIZE,
+                                        AES_BLOCK_SIZE, 0);
+
+       sg_init_one(&iv_sg, iv, ivsize);
+       sgs[num_out++] = &iv_sg;
+       vc_sym_req->iv = iv;
+
+       /* Source data */
+       for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
+               sgs[num_out++] = sg;
+
+       /* Destination data */
+       for (sg = req->dst; sg; sg = sg_next(sg))
+               sgs[num_out + num_in++] = sg;
+
+       /* Status */
+       sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
+       sgs[num_out + num_in++] = &status_sg;
+
+       vc_req->sgs = sgs;
+
+       spin_lock_irqsave(&data_vq->lock, flags);
+       err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
+                               num_in, vc_req, GFP_ATOMIC);
+       virtqueue_kick(data_vq->vq);
+       spin_unlock_irqrestore(&data_vq->lock, flags);
+       if (unlikely(err < 0))
+               goto free_iv;
+
+       return 0;
+
+free_iv:
+       kfree_sensitive(iv);
+free:
+       kfree_sensitive(req_data);
+       kfree(sgs);
+       return err;
+}
+
+static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
+       struct virtio_crypto_sym_request *vc_sym_req =
+                               skcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       /* Use the first data virtqueue as default */
+       struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+       if (!req->cryptlen)
+               return 0;
+       if (req->cryptlen % AES_BLOCK_SIZE)
+               return -EINVAL;
+
+       vc_req->dataq = data_vq;
+       vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+       vc_sym_req->skcipher_ctx = ctx;
+       vc_sym_req->skcipher_req = req;
+       vc_sym_req->encrypt = true;
+
+       return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
+       struct virtio_crypto_sym_request *vc_sym_req =
+                               skcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       /* Use the first data virtqueue as default */
+       struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+       if (!req->cryptlen)
+               return 0;
+       if (req->cryptlen % AES_BLOCK_SIZE)
+               return -EINVAL;
+
+       vc_req->dataq = data_vq;
+       vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+       vc_sym_req->skcipher_ctx = ctx;
+       vc_sym_req->skcipher_req = req;
+       vc_sym_req->encrypt = false;
+
+       return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
+{
+       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
+       ctx->tfm = tfm;
+
+       ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
+       ctx->enginectx.op.prepare_request = NULL;
+       ctx->enginectx.op.unprepare_request = NULL;
+       return 0;
+}
+
+static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
+{
+       struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       if (!ctx->vcrypto)
+               return;
+
+       virtio_crypto_alg_skcipher_close_session(ctx, 1);
+       virtio_crypto_alg_skcipher_close_session(ctx, 0);
+       virtcrypto_dev_put(ctx->vcrypto);
+       ctx->vcrypto = NULL;
+}
+
+int virtio_crypto_skcipher_crypt_req(
+       struct crypto_engine *engine, void *vreq)
+{
+       struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
+       struct virtio_crypto_sym_request *vc_sym_req =
+                               skcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+       struct data_queue *data_vq = vc_req->dataq;
+       int ret;
+
+       ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
+       if (ret < 0)
+               return ret;
+
+       virtqueue_kick(data_vq->vq);
+
+       return 0;
+}
+
+static void virtio_crypto_skcipher_finalize_req(
+       struct virtio_crypto_sym_request *vc_sym_req,
+       struct skcipher_request *req,
+       int err)
+{
+       if (vc_sym_req->encrypt)
+               scatterwalk_map_and_copy(req->iv, req->dst,
+                                        req->cryptlen - AES_BLOCK_SIZE,
+                                        AES_BLOCK_SIZE, 0);
+       kfree_sensitive(vc_sym_req->iv);
+       virtcrypto_clear_request(&vc_sym_req->base);
+
+       crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
+                                          req, err);
+}
+
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+       .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+       .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+       .algo = {
+               .base.cra_name          = "cbc(aes)",
+               .base.cra_driver_name   = "virtio_crypto_aes_cbc",
+               .base.cra_priority      = 150,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC |
+                                         CRYPTO_ALG_ALLOCATES_MEMORY,
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct virtio_crypto_skcipher_ctx),
+               .base.cra_module        = THIS_MODULE,
+               .init                   = virtio_crypto_skcipher_init,
+               .exit                   = virtio_crypto_skcipher_exit,
+               .setkey                 = virtio_crypto_skcipher_setkey,
+               .decrypt                = virtio_crypto_skcipher_decrypt,
+               .encrypt                = virtio_crypto_skcipher_encrypt,
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+       },
+} };
+
+int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
+{
+       int ret = 0;
+       int i = 0;
+
+       mutex_lock(&algs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+               uint32_t service = virtio_crypto_algs[i].service;
+               uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+               if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_algs[i].active_devs == 0) {
+                       ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
+                       if (ret)
+                               goto unlock;
+               }
+
+               virtio_crypto_algs[i].active_devs++;
+               dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+                        virtio_crypto_algs[i].algo.base.cra_name);
+       }
+
+unlock:
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
+{
+       int i = 0;
+
+       mutex_lock(&algs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+               uint32_t service = virtio_crypto_algs[i].service;
+               uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+               if (virtio_crypto_algs[i].active_devs == 0 ||
+                   !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_algs[i].active_devs == 1)
+                       crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
+
+               virtio_crypto_algs[i].active_devs--;
+       }
+
+       mutex_unlock(&algs_lock);
+}
index f5b885d..6f56dfd 100644 (file)
@@ -891,9 +891,7 @@ static int msgdma_probe(struct platform_device *pdev)
        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (ret) {
                dev_warn(&pdev->dev, "unable to set coherent mask to 64");
-               ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-               if (ret)
-                       goto fail;
+               goto fail;
        }
 
        msgdma_reset(mdev);
index 33baf15..e9c9bcb 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier:  GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
 
 /*
@@ -35,7 +35,7 @@
 /*
  * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
  * master data bus width up to 512 bits (for both AXI master interfaces), but
- * it depends on IP block configurarion.
+ * it depends on IP block configuration.
  */
 #define AXI_DMA_BUSWIDTHS                \
        (DMA_SLAVE_BUSWIDTH_1_BYTE      | \
@@ -1089,10 +1089,10 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
 
        u32 status, i;
 
-       /* Disable DMAC inerrupts. We'll enable them after processing chanels */
+       /* Disable DMAC interrupts. We'll enable them after processing channels */
        axi_dma_irq_disable(chip);
 
-       /* Poll, clear and process every chanel interrupt status */
+       /* Poll, clear and process every channel interrupt status */
        for (i = 0; i < dw->hdata->nr_channels; i++) {
                chan = &dw->chan[i];
                status = axi_chan_irq_read(chan);
index be69a0b..e9d5eb0 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier:  GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
 
 /*
index 7d57184..03e2f4e 100644 (file)
@@ -139,7 +139,7 @@ struct dpaa2_qdma_priv_per_prio {
 
 static struct soc_device_attribute soc_fixup_tuning[] = {
        { .family = "QorIQ LX2160A"},
-       { },
+       { /* sentinel */ }
 };
 
 /* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
index 97c87a7..43817ce 100644 (file)
@@ -30,7 +30,7 @@
 #define HISI_DMA_MODE                  0x217c
 #define HISI_DMA_OFFSET                        0x100
 
-#define HISI_DMA_MSI_NUM               30
+#define HISI_DMA_MSI_NUM               32
 #define HISI_DMA_CHAN_NUM              30
 #define HISI_DMA_Q_DEPTH_VAL           1024
 
index 573ad8b..3061fe8 100644 (file)
@@ -681,8 +681,13 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
                group->use_rdbuf_limit = false;
                group->rdbufs_allowed = 0;
                group->rdbufs_reserved = 0;
-               group->tc_a = -1;
-               group->tc_b = -1;
+               if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+                       group->tc_a = 1;
+                       group->tc_b = 1;
+               } else {
+                       group->tc_a = -1;
+                       group->tc_b = -1;
+               }
        }
 }
 
index 08a5f43..993a5dc 100644 (file)
@@ -604,8 +604,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        dev_dbg(dev, "Set DMA masks\n");
        rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-       if (rc)
-               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
        if (rc)
                goto err;
 
index 75ec075..70c0aa9 100644 (file)
@@ -701,6 +701,11 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
        return 0;
 }
 
+static int is_sdma_channel_enabled(struct sdma_engine *sdma, int channel)
+{
+       return !!(readl(sdma->regs + SDMA_H_STATSTOP) & BIT(channel));
+}
+
 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 {
        writel(BIT(channel), sdma->regs + SDMA_H_START);
@@ -842,7 +847,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
                */
 
                desc->chn_real_count = bd->mode.count;
-               bd->mode.status |= BD_DONE;
                bd->mode.count = desc->period_len;
                desc->buf_ptail = desc->buf_tail;
                desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
@@ -857,9 +861,21 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
                dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
                spin_lock(&sdmac->vc.lock);
 
+               /* Assign buffer ownership to SDMA */
+               bd->mode.status |= BD_DONE;
+
                if (error)
                        sdmac->status = old_status;
        }
+
+       /*
+        * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
+        * owned buffer is available (i.e. BD_DONE was set too late).
+        */
+       if (!is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
+               dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
+               sdma_enable_channel(sdmac->sdma, sdmac->channel);
+       }
 }
 
 static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
@@ -876,9 +892,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
        for (i = 0; i < sdmac->desc->num_bd; i++) {
                bd = &sdmac->desc->bd[i];
 
-                if (bd->mode.status & (BD_DONE | BD_RROR))
+               if (bd->mode.status & (BD_DONE | BD_RROR))
                        error = -EIO;
-                sdmac->desc->chn_real_count += bd->mode.count;
+               sdmac->desc->chn_real_count += bd->mode.count;
        }
 
        if (error)
index 373b8da..5d707ff 100644 (file)
@@ -1364,8 +1364,6 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENOMEM;
 
        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-       if (err)
-               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
        if (err)
                return err;
 
index 5e46e34..6b5e91f 100644 (file)
@@ -1686,8 +1686,8 @@ static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
 {
        struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
        struct ppc440spe_adma_desc_slot *alloc_start = NULL;
-       struct list_head chain = LIST_HEAD_INIT(chain);
        int slots_found, retry = 0;
+       LIST_HEAD(chain);
 
 
        BUG_ON(!num_slots || !slots_per_op);
index c9e52f6..91b93e8 100644 (file)
@@ -100,12 +100,17 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
                spin_lock_irqsave(&chan->vc.lock, flags);
 
                if (desc) {
-                       if (desc->status != DMA_ERROR)
-                               desc->status = DMA_COMPLETE;
-
-                       dma_cookie_complete(tx_desc);
-                       dma_descriptor_unmap(tx_desc);
-                       list_del(&desc->vd.node);
+                       if (desc->status != DMA_COMPLETE) {
+                               if (desc->status != DMA_ERROR)
+                                       desc->status = DMA_COMPLETE;
+
+                               dma_cookie_complete(tx_desc);
+                               dma_descriptor_unmap(tx_desc);
+                               list_del(&desc->vd.node);
+                       } else {
+                               /* Don't handle it twice */
+                               tx_desc = NULL;
+                       }
                }
 
                desc = pt_next_dma_desc(chan);
@@ -233,9 +238,14 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
        struct pt_dma_chan *chan = to_pt_chan(dma_chan);
        struct pt_dma_desc *desc;
        unsigned long flags;
+       bool engine_is_idle = true;
 
        spin_lock_irqsave(&chan->vc.lock, flags);
 
+       desc = pt_next_dma_desc(chan);
+       if (desc)
+               engine_is_idle = false;
+
        vchan_issue_pending(&chan->vc);
 
        desc = pt_next_dma_desc(chan);
@@ -243,7 +253,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 
        /* If there was nothing active, start processing */
-       if (desc)
+       if (engine_is_idle)
                pt_cmd_callback(desc, 0);
 }
 
index 65d054b..51587cf 100644 (file)
@@ -838,9 +838,7 @@ static int hidma_probe(struct platform_device *pdev)
        rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (rc) {
                dev_warn(&pdev->dev, "unable to set coherent mask to 64");
-               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-               if (rc)
-                       goto dmafree;
+               goto dmafree;
        }
 
        dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
index a462962..b35d705 100644 (file)
@@ -49,10 +49,10 @@ config RENESAS_USB_DMAC
          SoCs.
 
 config RZ_DMAC
-       tristate "Renesas RZ/G2L DMA Controller"
-       depends on ARCH_R9A07G044 || COMPILE_TEST
+       tristate "Renesas RZ/{G2L,V2L} DMA Controller"
+       depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST
        select RENESAS_DMA
        select DMA_VIRTUAL_CHANNELS
        help
          This driver supports the general purpose DMA controller found in the
-         Renesas RZ/G2L SoC variants.
+         Renesas RZ/{G2L,V2L} SoC variants.
index b26ed69..158e5e7 100644 (file)
@@ -115,10 +115,8 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
                ret = pm_runtime_get(schan->dev);
 
                spin_unlock_irq(&schan->chan_lock);
-               if (ret < 0) {
+               if (ret < 0)
                        dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
-                       pm_runtime_put(schan->dev);
-               }
 
                pm_runtime_barrier(schan->dev);
 
index 83a37a6..d2365fa 100644 (file)
@@ -1389,6 +1389,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
        dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
        dd->max_burst = STM32_DMA_MAX_BURST;
+       dd->max_sg_burst = STM32_DMA_ALIGNED_MAX_DATA_ITEMS;
        dd->descriptor_reuse = true;
        dd->dev = &pdev->dev;
        INIT_LIST_HEAD(&dd->channels);
index 1d4081a..d3a303f 100644 (file)
@@ -9,5 +9,6 @@ obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
                            k3-psil-j721e.o \
                            k3-psil-j7200.o \
                            k3-psil-am64.o \
-                           k3-psil-j721s2.o
+                           k3-psil-j721s2.o \
+                           k3-psil-am62.o
 obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
index 8c2f7eb..062bd9b 100644 (file)
@@ -315,7 +315,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
                val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
                if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
                        u32 mask;
-                       /* set corresponding bit for completetion Q 93 */
+                       /* set corresponding bit for completion Q 93 */
                        mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
                        /* not set all bits for queues less than Q 93 */
                        mask--;
@@ -703,7 +703,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
         * transfer descriptor followed by TD descriptor. Waiting seems not to
         * cause any difference.
         * RX seems to be thrown out right away. However once the TearDown
-        * descriptor gets through we are done. If we have seens the transfer
+        * descriptor gets through we are done. If we have seen the transfer
         * descriptor before the TD we fetch it from enqueue, it has to be
         * there waiting for us.
         */
@@ -747,7 +747,7 @@ static int cppi41_stop_chan(struct dma_chan *chan)
                struct cppi41_channel *cc, *_ct;
 
                /*
-                * channels might still be in the pendling list if
+                * channels might still be in the pending list if
                 * cppi41_dma_issue_pending() is called after
                 * cppi41_runtime_suspend() is called
                 */
index 08e47f4..3ea8ef7 100644 (file)
 
 /*
  * Max of 20 segments per channel to conserve PaRAM slots
- * Also note that MAX_NR_SG should be atleast the no.of periods
+ * Also note that MAX_NR_SG should be at least the no.of periods
  * that are required for ASoC, otherwise DMA prep calls will
  * fail. Today davinci-pcm is the only user of this driver and
- * requires atleast 17 slots, so we setup the default to 20.
+ * requires at least 17 slots, so we setup the default to 20.
  */
 #define MAX_NR_SG              20
 #define EDMA_MAX_SLOTS         MAX_NR_SG
@@ -976,7 +976,7 @@ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
                 * and quotient respectively of the division of:
                 * (dma_length / acnt) by (SZ_64K -1). This is so
                 * that in case bcnt over flows, we have ccnt to use.
-                * Note: In A-sync tranfer only, bcntrld is used, but it
+                * Note: In A-sync transfer only, bcntrld is used, but it
                 * only applies for sg_dma_len(sg) >= SZ_64K.
                 * In this case, the best way adopted is- bccnt for the
                 * first frame will be the remainder below. Then for
@@ -1203,7 +1203,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
                 * slot2: the remaining amount of data after slot1.
                 *        ACNT = full_length - length1, length2 = ACNT
                 *
-                * When the full_length is multibple of 32767 one slot can be
+                * When the full_length is a multiple of 32767 one slot can be
                 * used to complete the transfer.
                 */
                width = array_size;
@@ -1814,7 +1814,7 @@ static void edma_issue_pending(struct dma_chan *chan)
  * This limit exists to avoid a possible infinite loop when waiting for proof
  * that a particular transfer is completed. This limit can be hit if there
  * are large bursts to/from slow devices or the CPU is never able to catch
- * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * the DMA hardware idle. On an AM335x transferring 48 bytes from the UART
  * RX-FIFO, as many as 55 loops have been seen.
  */
 #define EDMA_MAX_TR_WAIT_LOOPS 1000
diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c
new file mode 100644 (file)
index 0000000..d431e20
--- /dev/null
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_PKT(x)                                    \
+       {                                                       \
+               .thread_id = x,                                 \
+               .ep_config = {                                  \
+                       .ep_type = PSIL_EP_PDMA_XY,             \
+                       .mapped_channel_id = -1,                \
+                       .default_flow_id = -1,                  \
+                       .pkt_mode = 1,                          \
+               },                                              \
+       }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt)              \
+       {                                                       \
+               .thread_id = x,                                 \
+               .ep_config = {                                  \
+                       .ep_type = PSIL_EP_NATIVE,              \
+                       .pkt_mode = 1,                          \
+                       .needs_epib = 1,                        \
+                       .psd_size = 16,                         \
+                       .mapped_channel_id = ch,                \
+                       .flow_start = flow_base,                \
+                       .flow_num = flow_cnt,                   \
+                       .default_flow_id = flow_base,           \
+               },                                              \
+       }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx)        \
+       {                                                       \
+               .thread_id = x,                                 \
+               .ep_config = {                                  \
+                       .ep_type = PSIL_EP_NATIVE,              \
+                       .pkt_mode = 1,                          \
+                       .needs_epib = 1,                        \
+                       .psd_size = 64,                         \
+                       .mapped_channel_id = ch,                \
+                       .flow_start = flow_base,                \
+                       .flow_num = flow_cnt,                   \
+                       .default_flow_id = default_flow,        \
+                       .notdpkt = tx,                          \
+               },                                              \
+       }
+
+#define PSIL_PDMA_MCASP(x)                             \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+                       .pdma_acc32 = 1,                \
+                       .pdma_burst = 1,                \
+               },                                      \
+       }
+
+#define PSIL_CSI2RX(x)                                 \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+               },                                      \
+       }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62_src_ep_map[] = {
+       /* SAUL */
+       PSIL_SAUL(0x7500, 20, 35, 8, 35, 0),
+       PSIL_SAUL(0x7501, 21, 35, 8, 36, 0),
+       PSIL_SAUL(0x7502, 22, 43, 8, 43, 0),
+       PSIL_SAUL(0x7503, 23, 43, 8, 44, 0),
+       /* PDMA_MAIN0 - SPI0-3 */
+       PSIL_PDMA_XY_PKT(0x4302),
+       PSIL_PDMA_XY_PKT(0x4303),
+       PSIL_PDMA_XY_PKT(0x4304),
+       PSIL_PDMA_XY_PKT(0x4305),
+       PSIL_PDMA_XY_PKT(0x4306),
+       PSIL_PDMA_XY_PKT(0x4307),
+       PSIL_PDMA_XY_PKT(0x4308),
+       PSIL_PDMA_XY_PKT(0x4309),
+       PSIL_PDMA_XY_PKT(0x430a),
+       PSIL_PDMA_XY_PKT(0x430b),
+       PSIL_PDMA_XY_PKT(0x430c),
+       PSIL_PDMA_XY_PKT(0x430d),
+       /* PDMA_MAIN1 - UART0-6 */
+       PSIL_PDMA_XY_PKT(0x4400),
+       PSIL_PDMA_XY_PKT(0x4401),
+       PSIL_PDMA_XY_PKT(0x4402),
+       PSIL_PDMA_XY_PKT(0x4403),
+       PSIL_PDMA_XY_PKT(0x4404),
+       PSIL_PDMA_XY_PKT(0x4405),
+       PSIL_PDMA_XY_PKT(0x4406),
+       /* PDMA_MAIN2 - MCASP0-2 */
+       PSIL_PDMA_MCASP(0x4500),
+       PSIL_PDMA_MCASP(0x4501),
+       PSIL_PDMA_MCASP(0x4502),
+       /* CPSW3G */
+       PSIL_ETHERNET(0x4600, 19, 19, 16),
+       /* CSI2RX */
+       PSIL_CSI2RX(0x4700),
+       PSIL_CSI2RX(0x4701),
+       PSIL_CSI2RX(0x4702),
+       PSIL_CSI2RX(0x4703),
+       PSIL_CSI2RX(0x4704),
+       PSIL_CSI2RX(0x4705),
+       PSIL_CSI2RX(0x4706),
+       PSIL_CSI2RX(0x4707),
+       PSIL_CSI2RX(0x4708),
+       PSIL_CSI2RX(0x4709),
+       PSIL_CSI2RX(0x470a),
+       PSIL_CSI2RX(0x470b),
+       PSIL_CSI2RX(0x470c),
+       PSIL_CSI2RX(0x470d),
+       PSIL_CSI2RX(0x470e),
+       PSIL_CSI2RX(0x470f),
+       PSIL_CSI2RX(0x4710),
+       PSIL_CSI2RX(0x4711),
+       PSIL_CSI2RX(0x4712),
+       PSIL_CSI2RX(0x4713),
+       PSIL_CSI2RX(0x4714),
+       PSIL_CSI2RX(0x4715),
+       PSIL_CSI2RX(0x4716),
+       PSIL_CSI2RX(0x4717),
+       PSIL_CSI2RX(0x4718),
+       PSIL_CSI2RX(0x4719),
+       PSIL_CSI2RX(0x471a),
+       PSIL_CSI2RX(0x471b),
+       PSIL_CSI2RX(0x471c),
+       PSIL_CSI2RX(0x471d),
+       PSIL_CSI2RX(0x471e),
+       PSIL_CSI2RX(0x471f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62_dst_ep_map[] = {
+       /* SAUL */
+       PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+       PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+       /* PDMA_MAIN0 - SPI0-3 */
+       PSIL_PDMA_XY_PKT(0xc302),
+       PSIL_PDMA_XY_PKT(0xc303),
+       PSIL_PDMA_XY_PKT(0xc304),
+       PSIL_PDMA_XY_PKT(0xc305),
+       PSIL_PDMA_XY_PKT(0xc306),
+       PSIL_PDMA_XY_PKT(0xc307),
+       PSIL_PDMA_XY_PKT(0xc308),
+       PSIL_PDMA_XY_PKT(0xc309),
+       PSIL_PDMA_XY_PKT(0xc30a),
+       PSIL_PDMA_XY_PKT(0xc30b),
+       PSIL_PDMA_XY_PKT(0xc30c),
+       PSIL_PDMA_XY_PKT(0xc30d),
+       /* PDMA_MAIN1 - UART0-6 */
+       PSIL_PDMA_XY_PKT(0xc400),
+       PSIL_PDMA_XY_PKT(0xc401),
+       PSIL_PDMA_XY_PKT(0xc402),
+       PSIL_PDMA_XY_PKT(0xc403),
+       PSIL_PDMA_XY_PKT(0xc404),
+       PSIL_PDMA_XY_PKT(0xc405),
+       PSIL_PDMA_XY_PKT(0xc406),
+       /* PDMA_MAIN2 - MCASP0-2 */
+       PSIL_PDMA_MCASP(0xc500),
+       PSIL_PDMA_MCASP(0xc501),
+       PSIL_PDMA_MCASP(0xc502),
+       /* CPSW3G */
+       PSIL_ETHERNET(0xc600, 19, 19, 8),
+       PSIL_ETHERNET(0xc601, 20, 27, 8),
+       PSIL_ETHERNET(0xc602, 21, 35, 8),
+       PSIL_ETHERNET(0xc603, 22, 43, 8),
+       PSIL_ETHERNET(0xc604, 23, 51, 8),
+       PSIL_ETHERNET(0xc605, 24, 59, 8),
+       PSIL_ETHERNET(0xc606, 25, 67, 8),
+       PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62_ep_map = {
+       .name = "am62",
+       .src = am62_src_ep_map,
+       .src_count = ARRAY_SIZE(am62_src_ep_map),
+       .dst = am62_dst_ep_map,
+       .dst_count = ARRAY_SIZE(am62_dst_ep_map),
+};
index e51e179..74fa9ec 100644 (file)
@@ -42,5 +42,6 @@ extern struct psil_ep_map j721e_ep_map;
 extern struct psil_ep_map j7200_ep_map;
 extern struct psil_ep_map am64_ep_map;
 extern struct psil_ep_map j721s2_ep_map;
+extern struct psil_ep_map am62_ep_map;
 
 #endif /* K3_PSIL_PRIV_H_ */
index 8867b4b..761a384 100644 (file)
@@ -22,6 +22,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
        { .family = "J7200", .data = &j7200_ep_map },
        { .family = "AM64X", .data = &am64_ep_map },
        { .family = "J721S2", .data = &j721s2_ep_map },
+       { .family = "AM62X", .data = &am62_ep_map },
        { /* sentinel */ }
 };
 
index d2d4cbe..2f0d2c6 100644 (file)
@@ -4375,6 +4375,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
        { .family = "J7200", .data = &j7200_soc_data },
        { .family = "AM64X", .data = &am64_soc_data },
        { .family = "J721S2", .data = &j721e_soc_data},
+       { .family = "AM62X", .data = &am64_soc_data },
        { /* sentinel */ }
 };
 
index 7cb577e..8e52a0d 100644 (file)
@@ -1442,7 +1442,7 @@ static int omap_dma_pause(struct dma_chan *chan)
         * A source-synchronised channel is one where the fetching of data is
         * under control of the device. In other words, a device-to-memory
         * transfer. So, a destination-synchronised channel (which would be a
-        * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+        * memory-to-device transfer) undergoes an abort if the CCR_ENABLE
         * bit is cleared.
         * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
         * aborts immediately after completion of current read/write
index 69854fd..416725c 100644 (file)
@@ -47,8 +47,9 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip,
 {
        struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
 
-       /* Only clear the OE bit here, requires a RMW. Prevents potential issue
-        * with OE and data getting to the physical pin at different times.
+       /*
+        * Only clear the OE bit here, requires a RMW. Prevents a potential issue
+        * with OE and DAT getting to the physical pin at different times.
         */
        return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0);
 }
@@ -60,9 +61,10 @@ static int ts4900_gpio_direction_output(struct gpio_chip *chip,
        unsigned int reg;
        int ret;
 
-       /* If changing from an input to an output, we need to first set the
-        * proper data bit to what is requested and then set OE bit. This
-        * prevents a glitch that can occur on the IO line
+       /*
+        * If changing from an input to an output, we need to first set the
+        * GPIO's DAT bit to what is requested and then set the OE bit. This
+        * prevents a glitch that can occur on the IO line.
         */
        regmap_read(priv->regmap, offset, &reg);
        if (!(reg & TS4900_GPIO_OE)) {
index b159e92..8e03614 100644 (file)
  * Actually, the following platforms have DIO support:
  *
  * TS-5500:
- *   Documentation: http://wiki.embeddedarm.com/wiki/TS-5500
+ *   Documentation: https://docs.embeddedts.com/TS-5500
  *   Blocks: DIO1, DIO2 and LCD port.
  *
  * TS-5600:
- *   Documentation: http://wiki.embeddedarm.com/wiki/TS-5600
+ *   Documentation: https://docs.embeddedts.com/TS-5600
  *   Blocks: LCD port (identical to TS-5500 LCD).
  */
 
index 7a67487..a95a7cb 100644 (file)
@@ -405,14 +405,25 @@ config HOLTEK_FF
          Say Y here if you have a Holtek On Line Grip based game controller
          and want to have force feedback support for it.
 
+config HID_VIVALDI_COMMON
+       tristate
+       help
+         ChromeOS Vivaldi HID parsing support library. This is a hidden
+         option so that drivers can use common code to parse the HID
+         descriptors for vivaldi function row keymap.
+
 config HID_GOOGLE_HAMMER
        tristate "Google Hammer Keyboard"
+       select HID_VIVALDI_COMMON
+       select INPUT_VIVALDIFMAP
        depends on USB_HID && LEDS_CLASS && CROS_EC
        help
        Say Y here if you have a Google Hammer device.
 
 config HID_VIVALDI
        tristate "Vivaldi Keyboard"
+       select HID_VIVALDI_COMMON
+       select INPUT_VIVALDIFMAP
        depends on HID
        help
          Say Y here if you want to enable support for Vivaldi keyboards.
index d5ce8d7..345ac55 100644 (file)
@@ -50,6 +50,7 @@ obj-$(CONFIG_HID_FT260)               += hid-ft260.o
 obj-$(CONFIG_HID_GEMBIRD)      += hid-gembird.o
 obj-$(CONFIG_HID_GFRM)         += hid-gfrm.o
 obj-$(CONFIG_HID_GLORIOUS)  += hid-glorious.o
+obj-$(CONFIG_HID_VIVALDI_COMMON) += hid-vivaldi-common.o
 obj-$(CONFIG_HID_GOOGLE_HAMMER)        += hid-google-hammer.o
 obj-$(CONFIG_HID_VIVALDI)      += hid-vivaldi.o
 obj-$(CONFIG_HID_GT683R)       += hid-gt683r.o
index ddbe0de..ff40f1e 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/acpi.h>
 #include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -25,6 +26,7 @@
 #include <asm/unaligned.h>
 
 #include "hid-ids.h"
+#include "hid-vivaldi-common.h"
 
 /*
  * C(hrome)B(ase)A(ttached)S(witch) - switch exported by Chrome EC and reporting
@@ -340,9 +342,9 @@ static int hammer_kbd_brightness_set_blocking(struct led_classdev *cdev,
 static int hammer_register_leds(struct hid_device *hdev)
 {
        struct hammer_kbd_leds *kbd_backlight;
-       int error;
 
-       kbd_backlight = kzalloc(sizeof(*kbd_backlight), GFP_KERNEL);
+       kbd_backlight = devm_kzalloc(&hdev->dev, sizeof(*kbd_backlight),
+                                    GFP_KERNEL);
        if (!kbd_backlight)
                return -ENOMEM;
 
@@ -356,26 +358,7 @@ static int hammer_register_leds(struct hid_device *hdev)
        /* Set backlight to 0% initially. */
        hammer_kbd_brightness_set_blocking(&kbd_backlight->cdev, 0);
 
-       error = led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
-       if (error)
-               goto err_free_mem;
-
-       hid_set_drvdata(hdev, kbd_backlight);
-       return 0;
-
-err_free_mem:
-       kfree(kbd_backlight);
-       return error;
-}
-
-static void hammer_unregister_leds(struct hid_device *hdev)
-{
-       struct hammer_kbd_leds *kbd_backlight = hid_get_drvdata(hdev);
-
-       if (kbd_backlight) {
-               led_classdev_unregister(&kbd_backlight->cdev);
-               kfree(kbd_backlight);
-       }
+       return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
 }
 
 #define HID_UP_GOOGLEVENDOR    0xffd10000
@@ -512,11 +495,23 @@ out:
        kfree(buf);
 }
 
+static void hammer_stop(void *hdev)
+{
+       hid_hw_stop(hdev);
+}
+
 static int hammer_probe(struct hid_device *hdev,
                        const struct hid_device_id *id)
 {
+       struct vivaldi_data *vdata;
        int error;
 
+       vdata = devm_kzalloc(&hdev->dev, sizeof(*vdata), GFP_KERNEL);
+       if (!vdata)
+               return -ENOMEM;
+
+       hid_set_drvdata(hdev, vdata);
+
        error = hid_parse(hdev);
        if (error)
                return error;
@@ -525,6 +520,10 @@ static int hammer_probe(struct hid_device *hdev,
        if (error)
                return error;
 
+       error = devm_add_action(&hdev->dev, hammer_stop, hdev);
+       if (error)
+               return error;
+
        /*
         * We always want to poll for, and handle tablet mode events from
         * devices that have folded usage, even when nobody has opened the input
@@ -577,15 +576,13 @@ static void hammer_remove(struct hid_device *hdev)
                spin_unlock_irqrestore(&cbas_ec_lock, flags);
        }
 
-       hammer_unregister_leds(hdev);
-
-       hid_hw_stop(hdev);
+       /* Unregistering LEDs and stopping the hardware is done via devm */
 }
 
 static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
-       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+       { HID_DEVICE(BUS_USB, HID_GROUP_VIVALDI,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
@@ -610,6 +607,8 @@ static struct hid_driver hammer_driver = {
        .id_table = hammer_devices,
        .probe = hammer_probe,
        .remove = hammer_remove,
+       .feature_mapping = vivaldi_feature_mapping,
+       .input_configured = vivaldi_input_configured,
        .input_mapping = hammer_input_mapping,
        .event = hammer_event,
 };
diff --git a/drivers/hid/hid-vivaldi-common.c b/drivers/hid/hid-vivaldi-common.c
new file mode 100644 (file)
index 0000000..8b3e515
--- /dev/null
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for ChromeOS HID Vivaldi keyboards
+ *
+ * Copyright (C) 2022 Google, Inc
+ */
+
+#include <linux/export.h>
+#include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "hid-vivaldi-common.h"
+
+#define MIN_FN_ROW_KEY 1
+#define MAX_FN_ROW_KEY VIVALDI_MAX_FUNCTION_ROW_KEYS
+#define HID_VD_FN_ROW_PHYSMAP 0x00000001
+#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
+
+/**
+ * vivaldi_feature_mapping - Fill out vivaldi keymap data exposed via HID
+ * @hdev: HID device to parse
+ * @field: HID field to parse
+ * @usage: HID usage to parse
+ *
+ * Note: this function assumes that driver data attached to @hdev contains an
+ * instance of &struct vivaldi_data at the very beginning.
+ */
+void vivaldi_feature_mapping(struct hid_device *hdev,
+                            struct hid_field *field, struct hid_usage *usage)
+{
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+       struct hid_report *report = field->report;
+       u8 *report_data, *buf;
+       u32 report_len;
+       unsigned int fn_key;
+       int ret;
+
+       if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
+           (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
+               return;
+
+       fn_key = usage->hid & HID_USAGE;
+       if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
+               return;
+
+       if (fn_key > data->num_function_row_keys)
+               data->num_function_row_keys = fn_key;
+
+       report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
+       if (!report_data)
+               return;
+
+       report_len = hid_report_len(report);
+       if (!report->id) {
+               /*
+                * hid_hw_raw_request() will stuff report ID (which will be 0)
+                * into the first byte of the buffer even for unnumbered
+                * reports, so we need to account for this to avoid getting
+                * -EOVERFLOW in return.
+                * Note that hid_alloc_report_buf() adds 7 bytes to the size
+                * so we can safely say that we have space for an extra byte.
+                */
+               report_len++;
+       }
+
+       ret = hid_hw_raw_request(hdev, report->id, report_data,
+                                report_len, HID_FEATURE_REPORT,
+                                HID_REQ_GET_REPORT);
+       if (ret < 0) {
+               dev_warn(&hdev->dev, "failed to fetch feature %d\n",
+                        field->report->id);
+               goto out;
+       }
+
+       if (!report->id) {
+               /*
+                * Undo the damage from hid_hw_raw_request() for unnumbered
+                * reports.
+                */
+               report_data++;
+               report_len--;
+       }
+
+       ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
+                                  report_len, 0);
+       if (ret) {
+               dev_warn(&hdev->dev, "failed to report feature %d\n",
+                        field->report->id);
+               goto out;
+       }
+
+       data->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
+               field->value[usage->usage_index];
+
+out:
+       kfree(buf);
+}
+EXPORT_SYMBOL_GPL(vivaldi_feature_mapping);
+
+static ssize_t function_row_physmap_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct hid_device *hdev = to_hid_device(dev);
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+
+       return vivaldi_function_row_physmap_show(data, buf);
+}
+
+static DEVICE_ATTR_RO(function_row_physmap);
+static struct attribute *vivaldi_sysfs_attrs[] = {
+       &dev_attr_function_row_physmap.attr,
+       NULL
+};
+
+static const struct attribute_group vivaldi_attribute_group = {
+       .attrs = vivaldi_sysfs_attrs,
+};
+
+/**
+ * vivaldi_input_configured - Complete initialization of device using vivaldi map
+ * @hdev: HID device to which vivaldi attributes should be attached
+ * @hidinput: HID input device (unused)
+ */
+int vivaldi_input_configured(struct hid_device *hdev,
+                            struct hid_input *hidinput)
+{
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+
+       if (!data->num_function_row_keys)
+               return 0;
+
+       return devm_device_add_group(&hdev->dev, &vivaldi_attribute_group);
+}
+EXPORT_SYMBOL_GPL(vivaldi_input_configured);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-vivaldi-common.h b/drivers/hid/hid-vivaldi-common.h
new file mode 100644 (file)
index 0000000..d42e82d
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _HID_VIVALDI_COMMON_H
+#define _HID_VIVALDI_COMMON_H
+
+struct hid_device;
+struct hid_field;
+struct hid_input;
+struct hid_usage;
+
+void vivaldi_feature_mapping(struct hid_device *hdev,
+                            struct hid_field *field, struct hid_usage *usage);
+
+int vivaldi_input_configured(struct hid_device *hdev,
+                            struct hid_input *hidinput);
+
+#endif /* _HID_VIVALDI_COMMON_H */
index 42ceb20..3a97912 100644 (file)
@@ -8,48 +8,11 @@
 
 #include <linux/device.h>
 #include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/sysfs.h>
 
-#define MIN_FN_ROW_KEY 1
-#define MAX_FN_ROW_KEY 24
-#define HID_VD_FN_ROW_PHYSMAP 0x00000001
-#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
-
-struct vivaldi_data {
-       u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1];
-       int max_function_row_key;
-};
-
-static ssize_t function_row_physmap_show(struct device *dev,
-                                        struct device_attribute *attr,
-                                        char *buf)
-{
-       struct hid_device *hdev = to_hid_device(dev);
-       struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
-       ssize_t size = 0;
-       int i;
-
-       if (!drvdata->max_function_row_key)
-               return 0;
-
-       for (i = 0; i < drvdata->max_function_row_key; i++)
-               size += sprintf(buf + size, "%02X ",
-                               drvdata->function_row_physmap[i]);
-       size += sprintf(buf + size, "\n");
-       return size;
-}
-
-static DEVICE_ATTR_RO(function_row_physmap);
-static struct attribute *sysfs_attrs[] = {
-       &dev_attr_function_row_physmap.attr,
-       NULL
-};
-
-static const struct attribute_group input_attribute_group = {
-       .attrs = sysfs_attrs
-};
+#include "hid-vivaldi-common.h"
 
 static int vivaldi_probe(struct hid_device *hdev,
                         const struct hid_device_id *id)
@@ -70,86 +33,8 @@ static int vivaldi_probe(struct hid_device *hdev,
        return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 }
 
-static void vivaldi_feature_mapping(struct hid_device *hdev,
-                                   struct hid_field *field,
-                                   struct hid_usage *usage)
-{
-       struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
-       struct hid_report *report = field->report;
-       int fn_key;
-       int ret;
-       u32 report_len;
-       u8 *report_data, *buf;
-
-       if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
-           (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
-               return;
-
-       fn_key = (usage->hid & HID_USAGE);
-       if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
-               return;
-       if (fn_key > drvdata->max_function_row_key)
-               drvdata->max_function_row_key = fn_key;
-
-       report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
-       if (!report_data)
-               return;
-
-       report_len = hid_report_len(report);
-       if (!report->id) {
-               /*
-                * hid_hw_raw_request() will stuff report ID (which will be 0)
-                * into the first byte of the buffer even for unnumbered
-                * reports, so we need to account for this to avoid getting
-                * -EOVERFLOW in return.
-                * Note that hid_alloc_report_buf() adds 7 bytes to the size
-                * so we can safely say that we have space for an extra byte.
-                */
-               report_len++;
-       }
-
-       ret = hid_hw_raw_request(hdev, report->id, report_data,
-                                report_len, HID_FEATURE_REPORT,
-                                HID_REQ_GET_REPORT);
-       if (ret < 0) {
-               dev_warn(&hdev->dev, "failed to fetch feature %d\n",
-                        field->report->id);
-               goto out;
-       }
-
-       if (!report->id) {
-               /*
-                * Undo the damage from hid_hw_raw_request() for unnumbered
-                * reports.
-                */
-               report_data++;
-               report_len--;
-       }
-
-       ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
-                                  report_len, 0);
-       if (ret) {
-               dev_warn(&hdev->dev, "failed to report feature %d\n",
-                        field->report->id);
-               goto out;
-       }
-
-       drvdata->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
-           field->value[usage->usage_index];
-
-out:
-       kfree(buf);
-}
-
-static int vivaldi_input_configured(struct hid_device *hdev,
-                                   struct hid_input *hidinput)
-{
-       return devm_device_add_group(&hdev->dev, &input_attribute_group);
-}
-
 static const struct hid_device_id vivaldi_table[] = {
-       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID,
-                    HID_ANY_ID) },
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID, HID_ANY_ID) },
        { }
 };
 
index d221fc9..22e2ffb 100644 (file)
@@ -93,8 +93,7 @@ static int sprd_hwspinlock_probe(struct platform_device *pdev)
                return -ENODEV;
 
        sprd_hwlock = devm_kzalloc(&pdev->dev,
-                                  sizeof(struct sprd_hwspinlock_dev) +
-                                  SPRD_HWLOCKS_NUM * sizeof(*lock),
+                                  struct_size(sprd_hwlock, bank.lock, SPRD_HWLOCKS_NUM),
                                   GFP_KERNEL);
        if (!sprd_hwlock)
                return -ENOMEM;
index 5bd11a7..bb5c7e5 100644 (file)
@@ -73,15 +73,13 @@ static int stm32_hwspinlock_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct stm32_hwspinlock *hw;
        void __iomem *io_base;
-       size_t array_size;
        int i, ret;
 
        io_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(io_base))
                return PTR_ERR(io_base);
 
-       array_size = STM32_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
-       hw = devm_kzalloc(dev, sizeof(*hw) + array_size, GFP_KERNEL);
+       hw = devm_kzalloc(dev, struct_size(hw, bank.lock, STM32_MUTEX_NUM_LOCKS), GFP_KERNEL);
        if (!hw)
                return -ENOMEM;
 
index dfe18dc..7850287 100644 (file)
@@ -609,7 +609,7 @@ static void i3c_master_free_i2c_dev(struct i2c_dev_desc *dev)
 
 static struct i2c_dev_desc *
 i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
-                        const struct i2c_dev_boardinfo *boardinfo)
+                        u16 addr, u8 lvr)
 {
        struct i2c_dev_desc *dev;
 
@@ -618,9 +618,8 @@ i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
                return ERR_PTR(-ENOMEM);
 
        dev->common.master = master;
-       dev->boardinfo = boardinfo;
-       dev->addr = boardinfo->base.addr;
-       dev->lvr = boardinfo->lvr;
+       dev->addr = addr;
+       dev->lvr = lvr;
 
        return dev;
 }
@@ -694,7 +693,7 @@ i3c_master_find_i2c_dev_by_addr(const struct i3c_master_controller *master,
        struct i2c_dev_desc *dev;
 
        i3c_bus_for_each_i2cdev(&master->bus, dev) {
-               if (dev->boardinfo->base.addr == addr)
+               if (dev->addr == addr)
                        return dev;
        }
 
@@ -1689,7 +1688,9 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
                                             i2cboardinfo->base.addr,
                                             I3C_ADDR_SLOT_I2C_DEV);
 
-               i2cdev = i3c_master_alloc_i2c_dev(master, i2cboardinfo);
+               i2cdev = i3c_master_alloc_i2c_dev(master,
+                                                 i2cboardinfo->base.addr,
+                                                 i2cboardinfo->lvr);
                if (IS_ERR(i2cdev)) {
                        ret = PTR_ERR(i2cdev);
                        goto err_detach_devs;
@@ -2166,15 +2167,127 @@ static u32 i3c_master_i2c_funcs(struct i2c_adapter *adapter)
        return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
 }
 
+static u8 i3c_master_i2c_get_lvr(struct i2c_client *client)
+{
+       /* Fall back to no spike filters and FM bus mode. */
+       u8 lvr = I3C_LVR_I2C_INDEX(2) | I3C_LVR_I2C_FM_MODE;
+
+       if (client->dev.of_node) {
+               u32 reg[3];
+
+               if (!of_property_read_u32_array(client->dev.of_node, "reg",
+                                               reg, ARRAY_SIZE(reg)))
+                       lvr = reg[2];
+       }
+
+       return lvr;
+}
+
+static int i3c_master_i2c_attach(struct i2c_adapter *adap, struct i2c_client *client)
+{
+       struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+       enum i3c_addr_slot_status status;
+       struct i2c_dev_desc *i2cdev;
+       int ret;
+
+       /* Already added by board info? */
+       if (i3c_master_find_i2c_dev_by_addr(master, client->addr))
+               return 0;
+
+       status = i3c_bus_get_addr_slot_status(&master->bus, client->addr);
+       if (status != I3C_ADDR_SLOT_FREE)
+               return -EBUSY;
+
+       i3c_bus_set_addr_slot_status(&master->bus, client->addr,
+                                    I3C_ADDR_SLOT_I2C_DEV);
+
+       i2cdev = i3c_master_alloc_i2c_dev(master, client->addr,
+                                         i3c_master_i2c_get_lvr(client));
+       if (IS_ERR(i2cdev)) {
+               ret = PTR_ERR(i2cdev);
+               goto out_clear_status;
+       }
+
+       ret = i3c_master_attach_i2c_dev(master, i2cdev);
+       if (ret)
+               goto out_free_dev;
+
+       return 0;
+
+out_free_dev:
+       i3c_master_free_i2c_dev(i2cdev);
+out_clear_status:
+       i3c_bus_set_addr_slot_status(&master->bus, client->addr,
+                                    I3C_ADDR_SLOT_FREE);
+
+       return ret;
+}
+
+static int i3c_master_i2c_detach(struct i2c_adapter *adap, struct i2c_client *client)
+{
+       struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+       struct i2c_dev_desc *dev;
+
+       dev = i3c_master_find_i2c_dev_by_addr(master, client->addr);
+       if (!dev)
+               return -ENODEV;
+
+       i3c_master_detach_i2c_dev(dev);
+       i3c_bus_set_addr_slot_status(&master->bus, dev->addr,
+                                    I3C_ADDR_SLOT_FREE);
+       i3c_master_free_i2c_dev(dev);
+
+       return 0;
+}
+
 static const struct i2c_algorithm i3c_master_i2c_algo = {
        .master_xfer = i3c_master_i2c_adapter_xfer,
        .functionality = i3c_master_i2c_funcs,
 };
 
+static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action,
+                                void *data)
+{
+       struct i2c_adapter *adap;
+       struct i2c_client *client;
+       struct device *dev = data;
+       struct i3c_master_controller *master;
+       int ret;
+
+       if (dev->type != &i2c_client_type)
+               return 0;
+
+       client = to_i2c_client(dev);
+       adap = client->adapter;
+
+       if (adap->algo != &i3c_master_i2c_algo)
+               return 0;
+
+       master = i2c_adapter_to_i3c_master(adap);
+
+       i3c_bus_maintenance_lock(&master->bus);
+       switch (action) {
+       case BUS_NOTIFY_ADD_DEVICE:
+               ret = i3c_master_i2c_attach(adap, client);
+               break;
+       case BUS_NOTIFY_DEL_DEVICE:
+               ret = i3c_master_i2c_detach(adap, client);
+               break;
+       }
+       i3c_bus_maintenance_unlock(&master->bus);
+
+       return ret;
+}
+
+static struct notifier_block i2cdev_notifier = {
+       .notifier_call = i3c_i2c_notifier_call,
+};
+
 static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
 {
        struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
        struct i2c_dev_desc *i2cdev;
+       struct i2c_dev_boardinfo *i2cboardinfo;
        int ret;
 
        adap->dev.parent = master->dev.parent;
@@ -2194,8 +2307,13 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
         * We silently ignore failures here. The bus should keep working
         * correctly even if one or more i2c devices are not registered.
         */
-       i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
-               i2cdev->dev = i2c_new_client_device(adap, &i2cdev->boardinfo->base);
+       list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
+               i2cdev = i3c_master_find_i2c_dev_by_addr(master,
+                                                        i2cboardinfo->base.addr);
+               if (WARN_ON(!i2cdev))
+                       continue;
+               i2cdev->dev = i2c_new_client_device(adap, &i2cboardinfo->base);
+       }
 
        return 0;
 }
@@ -2697,12 +2815,27 @@ void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
 
 static int __init i3c_init(void)
 {
-       return bus_register(&i3c_bus_type);
+       int res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
+
+       if (res)
+               return res;
+
+       res = bus_register(&i3c_bus_type);
+       if (res)
+               goto out_unreg_notifier;
+
+       return 0;
+
+out_unreg_notifier:
+       bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
+
+       return res;
 }
 subsys_initcall(i3c_init);
 
 static void __exit i3c_exit(void)
 {
+       bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
        idr_destroy(&i3c_bus_idr);
        bus_unregister(&i3c_bus_type);
 }
index 5baebf6..e2752f7 100644 (file)
@@ -77,6 +77,13 @@ config INPUT_MATRIXKMAP
          To compile this driver as a module, choose M here: the
          module will be called matrix-keymap.
 
+config INPUT_VIVALDIFMAP
+       tristate
+       help
+         ChromeOS Vivaldi keymap support library. This is a hidden
+         option so that drivers can use common code to parse and
+         expose the vivaldi function row keymap.
+
 comment "Userland interfaces"
 
 config INPUT_MOUSEDEV
index 037cc59..2266c7d 100644 (file)
@@ -12,6 +12,7 @@ input-core-y += touchscreen.o
 obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
 obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
 obj-$(CONFIG_INPUT_MATRIXKMAP) += matrix-keymap.o
+obj-$(CONFIG_INPUT_VIVALDIFMAP)        += vivaldi-fmap.o
 
 obj-$(CONFIG_INPUT_LEDS)       += input-leds.o
 obj-$(CONFIG_INPUT_MOUSEDEV)   += mousedev.o
index c3139bc..e5a668c 100644 (file)
@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
 
 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
 
+static const unsigned int input_max_code[EV_CNT] = {
+       [EV_KEY] = KEY_MAX,
+       [EV_REL] = REL_MAX,
+       [EV_ABS] = ABS_MAX,
+       [EV_MSC] = MSC_MAX,
+       [EV_SW] = SW_MAX,
+       [EV_LED] = LED_MAX,
+       [EV_SND] = SND_MAX,
+       [EV_FF] = FF_MAX,
+};
+
 static inline int is_event_supported(unsigned int code,
                                     unsigned long *bm, unsigned int max)
 {
@@ -511,6 +522,9 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis,
 {
        struct input_absinfo *absinfo;
 
+       __set_bit(EV_ABS, dev->evbit);
+       __set_bit(axis, dev->absbit);
+
        input_alloc_absinfo(dev);
        if (!dev->absinfo)
                return;
@@ -520,12 +534,45 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis,
        absinfo->maximum = max;
        absinfo->fuzz = fuzz;
        absinfo->flat = flat;
-
-       __set_bit(EV_ABS, dev->evbit);
-       __set_bit(axis, dev->absbit);
 }
 EXPORT_SYMBOL(input_set_abs_params);
 
+/**
+ * input_copy_abs - Copy absinfo from one input_dev to another
+ * @dst: Destination input device to copy the abs settings to
+ * @dst_axis: ABS_* value selecting the destination axis
+ * @src: Source input device to copy the abs settings from
+ * @src_axis: ABS_* value selecting the source axis
+ *
+ * Set absinfo for the selected destination axis by copying it from
+ * the specified source input device's source axis.
+ * This is useful to e.g. setup a pen/stylus input-device for combined
+ * touchscreen/pen hardware where the pen uses the same coordinates as
+ * the touchscreen.
+ */
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+                   const struct input_dev *src, unsigned int src_axis)
+{
+       /* src must have EV_ABS and src_axis set */
+       if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
+                     test_bit(src_axis, src->absbit))))
+               return;
+
+       /*
+        * input_alloc_absinfo() may have failed for the source. Our caller is
+        * expected to catch this when registering the input devices, which may
+        * happen after the input_copy_abs() call.
+        */
+       if (!src->absinfo)
+               return;
+
+       input_set_capability(dst, EV_ABS, dst_axis);
+       if (!dst->absinfo)
+               return;
+
+       dst->absinfo[dst_axis] = src->absinfo[src_axis];
+}
+EXPORT_SYMBOL(input_copy_abs);
 
 /**
  * input_grab_device - grabs device for exclusive use
@@ -2074,6 +2121,14 @@ EXPORT_SYMBOL(input_get_timestamp);
  */
 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
 {
+       if (type < EV_CNT && input_max_code[type] &&
+           code > input_max_code[type]) {
+               pr_err("%s: invalid code %u for type %u\n", __func__, code,
+                      type);
+               dump_stack();
+               return;
+       }
+
        switch (type) {
        case EV_KEY:
                __set_bit(code, dev->keybit);
@@ -2085,9 +2140,6 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
 
        case EV_ABS:
                input_alloc_absinfo(dev);
-               if (!dev->absinfo)
-                       return;
-
                __set_bit(code, dev->absbit);
                break;
 
@@ -2285,12 +2337,6 @@ int input_register_device(struct input_dev *dev)
        /* KEY_RESERVED is not supposed to be transmitted to userspace. */
        __clear_bit(KEY_RESERVED, dev->keybit);
 
-       /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
-       if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
-               __clear_bit(BTN_RIGHT, dev->keybit);
-               __clear_bit(BTN_MIDDLE, dev->keybit);
-       }
-
        /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
        input_cleanse_bitmasks(dev);
 
index 592c95b..e10d57b 100644 (file)
@@ -123,7 +123,7 @@ static void adi_read_packet(struct adi_port *port)
 {
        struct adi *adi = port->adi;
        struct gameport *gameport = port->gameport;
-       unsigned char u, v, w, x, z;
+       unsigned char u, v, w, x;
        int t[2], s[2], i;
        unsigned long flags;
 
@@ -136,7 +136,7 @@ static void adi_read_packet(struct adi_port *port)
        local_irq_save(flags);
 
        gameport_trigger(gameport);
-       v = z = gameport_read(gameport);
+       v = gameport_read(gameport);
 
        do {
                u = v;
index 4c914f7..18190b5 100644 (file)
@@ -131,7 +131,7 @@ static const struct xpad_device {
        { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
        { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
        { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
-       { 0x045e, 0x0b12, "Microsoft Xbox One X pad", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+       { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
        { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
        { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
        { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
index 9417ee0..4ea79db 100644 (file)
@@ -103,6 +103,7 @@ config KEYBOARD_ATKBD
        select SERIO_LIBPS2
        select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
        select SERIO_GSCPS2 if GSC
+       select INPUT_VIVALDIFMAP
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
          you'll need this, unless you have a different type keyboard (USB, ADB
@@ -749,6 +750,7 @@ config KEYBOARD_XTKBD
 config KEYBOARD_CROS_EC
        tristate "ChromeOS EC keyboard"
        select INPUT_MATRIXKMAP
+       select INPUT_VIVALDIFMAP
        depends on CROS_EC
        help
          Say Y here to enable the matrix keyboard used by ChromeOS devices
@@ -779,6 +781,18 @@ config KEYBOARD_BCM
          To compile this driver as a module, choose M here: the
          module will be called bcm-keypad.
 
+config KEYBOARD_MT6779
+       tristate "MediaTek Keypad Support"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       select REGMAP_MMIO
+       select INPUT_MATRIXKMAP
+       help
+         Say Y here if you want to use the keypad on MediaTek SoCs.
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mt6779-keypad.
+
 config KEYBOARD_MTK_PMIC
        tristate "MediaTek PMIC keys support"
        depends on MFD_MT6397
index e3c8648..721936e 100644 (file)
@@ -44,6 +44,7 @@ obj-$(CONFIG_KEYBOARD_MATRIX)         += matrix_keypad.o
 obj-$(CONFIG_KEYBOARD_MAX7359)         += max7359_keypad.o
 obj-$(CONFIG_KEYBOARD_MCS)             += mcs_touchkey.o
 obj-$(CONFIG_KEYBOARD_MPR121)          += mpr121_touchkey.o
+obj-$(CONFIG_KEYBOARD_MT6779)          += mt6779-keypad.o
 obj-$(CONFIG_KEYBOARD_MTK_PMIC)        += mtk-pmic-keys.o
 obj-$(CONFIG_KEYBOARD_NEWTON)          += newtonkbd.o
 obj-$(CONFIG_KEYBOARD_NOMADIK)         += nomadik-ske-keypad.o
index fbdef95..d413123 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/input.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/serio.h>
 #include <linux/workqueue.h>
 #include <linux/libps2.h>
@@ -64,8 +65,6 @@ static bool atkbd_terminal;
 module_param_named(terminal, atkbd_terminal, bool, 0);
 MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
 
-#define MAX_FUNCTION_ROW_KEYS  24
-
 #define SCANCODE(keymap)       ((keymap >> 16) & 0xFFFF)
 #define KEYCODE(keymap)                (keymap & 0xFFFF)
 
@@ -237,8 +236,7 @@ struct atkbd {
        /* Serializes reconnect(), attr->set() and event work */
        struct mutex mutex;
 
-       u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS];
-       int num_function_row_keys;
+       struct vivaldi_data vdata;
 };
 
 /*
@@ -308,17 +306,7 @@ static struct attribute *atkbd_attributes[] = {
 
 static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf)
 {
-       ssize_t size = 0;
-       int i;
-
-       if (!atkbd->num_function_row_keys)
-               return 0;
-
-       for (i = 0; i < atkbd->num_function_row_keys; i++)
-               size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ",
-                                 atkbd->function_row_physmap[i]);
-       size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
-       return size;
+       return vivaldi_function_row_physmap_show(&atkbd->vdata, buf);
 }
 
 static umode_t atkbd_attr_is_visible(struct kobject *kobj,
@@ -329,7 +317,7 @@ static umode_t atkbd_attr_is_visible(struct kobject *kobj,
        struct atkbd *atkbd = serio_get_drvdata(serio);
 
        if (attr == &atkbd_attr_function_row_physmap.attr &&
-           !atkbd->num_function_row_keys)
+           !atkbd->vdata.num_function_row_keys)
                return 0;
 
        return attr->mode;
@@ -1206,10 +1194,11 @@ static void atkbd_parse_fwnode_data(struct serio *serio)
 
        /* Parse "function-row-physmap" property */
        n = device_property_count_u32(dev, "function-row-physmap");
-       if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS &&
+       if (n > 0 && n <= VIVALDI_MAX_FUNCTION_ROW_KEYS &&
            !device_property_read_u32_array(dev, "function-row-physmap",
-                                           atkbd->function_row_physmap, n)) {
-               atkbd->num_function_row_keys = n;
+                                           atkbd->vdata.function_row_physmap,
+                                           n)) {
+               atkbd->vdata.num_function_row_keys = n;
                dev_dbg(dev, "FW reported %d function-row key locations\n", n);
        }
 }
index fc02c54..6534dfc 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/bitops.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/notifier.h>
@@ -27,8 +28,6 @@
 
 #include <asm/unaligned.h>
 
-#define MAX_NUM_TOP_ROW_KEYS   15
-
 /**
  * struct cros_ec_keyb - Structure representing EC keyboard device
  *
@@ -44,9 +43,7 @@
  * @idev: The input device for the matrix keys.
  * @bs_idev: The input device for non-matrix buttons and switches (or NULL).
  * @notifier: interrupt event notifier for transport devices
- * @function_row_physmap: An array of the encoded rows/columns for the top
- *                        row function keys, in an order from left to right
- * @num_function_row_keys: The number of top row keys in a custom keyboard
+ * @vdata: vivaldi function row data
  */
 struct cros_ec_keyb {
        unsigned int rows;
@@ -64,8 +61,7 @@ struct cros_ec_keyb {
        struct input_dev *bs_idev;
        struct notifier_block notifier;
 
-       u16 function_row_physmap[MAX_NUM_TOP_ROW_KEYS];
-       size_t num_function_row_keys;
+       struct vivaldi_data vdata;
 };
 
 /**
@@ -537,9 +533,9 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
        int err;
        struct property *prop;
        const __be32 *p;
-       u16 *physmap;
+       u32 *physmap;
        u32 key_pos;
-       int row, col;
+       unsigned int row, col, scancode, n_physmap;
 
        err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
        if (err)
@@ -591,20 +587,21 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
        ckdev->idev = idev;
        cros_ec_keyb_compute_valid_keys(ckdev);
 
-       physmap = ckdev->function_row_physmap;
+       physmap = ckdev->vdata.function_row_physmap;
+       n_physmap = 0;
        of_property_for_each_u32(dev->of_node, "function-row-physmap",
                                 prop, p, key_pos) {
-               if (ckdev->num_function_row_keys == MAX_NUM_TOP_ROW_KEYS) {
+               if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) {
                        dev_warn(dev, "Only support up to %d top row keys\n",
-                                MAX_NUM_TOP_ROW_KEYS);
+                                VIVALDI_MAX_FUNCTION_ROW_KEYS);
                        break;
                }
                row = KEY_ROW(key_pos);
                col = KEY_COL(key_pos);
-               *physmap = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
-               physmap++;
-               ckdev->num_function_row_keys++;
+               scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+               physmap[n_physmap++] = scancode;
        }
+       ckdev->vdata.num_function_row_keys = n_physmap;
 
        err = input_register_device(ckdev->idev);
        if (err) {
@@ -619,18 +616,10 @@ static ssize_t function_row_physmap_show(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       ssize_t size = 0;
-       int i;
-       struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
-       u16 *physmap = ckdev->function_row_physmap;
-
-       for (i = 0; i < ckdev->num_function_row_keys; i++)
-               size += scnprintf(buf + size, PAGE_SIZE - size,
-                                 "%s%02X", size ? " " : "", physmap[i]);
-       if (size)
-               size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+       const struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+       const struct vivaldi_data *data = &ckdev->vdata;
 
-       return size;
+       return vivaldi_function_row_physmap_show(data, buf);
 }
 
 static DEVICE_ATTR_RO(function_row_physmap);
@@ -648,7 +637,7 @@ static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
        struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
 
        if (attr == &dev_attr_function_row_physmap.attr &&
-           !ckdev->num_function_row_keys)
+           !ckdev->vdata.num_function_row_keys)
                return 0;
 
        return attr->mode;
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
new file mode 100644 (file)
index 0000000..0dbbddc
--- /dev/null
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author Fengping Yu <fengping.yu@mediatek.com>
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MTK_KPD_NAME           "mt6779-keypad"
+#define MTK_KPD_MEM            0x0004
+#define MTK_KPD_DEBOUNCE       0x0018
+#define MTK_KPD_DEBOUNCE_MASK  GENMASK(13, 0)
+#define MTK_KPD_DEBOUNCE_MAX_MS        256
+#define MTK_KPD_NUM_MEMS       5
+#define MTK_KPD_NUM_BITS       136     /* 4*32+8 MEM5 only use 8 BITS */
+
+struct mt6779_keypad {
+       struct regmap *regmap;
+       struct input_dev *input_dev;
+       struct clk *clk;
+       void __iomem *base;
+       u32 n_rows;
+       u32 n_cols;
+       DECLARE_BITMAP(keymap_state, MTK_KPD_NUM_BITS);
+};
+
+static const struct regmap_config mt6779_keypad_regmap_cfg = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = sizeof(u32),
+       .max_register = 36,
+};
+
+static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
+{
+       struct mt6779_keypad *keypad = dev_id;
+       const unsigned short *keycode = keypad->input_dev->keycode;
+       DECLARE_BITMAP(new_state, MTK_KPD_NUM_BITS);
+       DECLARE_BITMAP(change, MTK_KPD_NUM_BITS);
+       unsigned int bit_nr;
+       unsigned int row, col;
+       unsigned int scancode;
+       unsigned int row_shift = get_count_order(keypad->n_cols);
+       bool pressed;
+
+       regmap_bulk_read(keypad->regmap, MTK_KPD_MEM,
+                        new_state, MTK_KPD_NUM_MEMS);
+
+       bitmap_xor(change, new_state, keypad->keymap_state, MTK_KPD_NUM_BITS);
+
+       for_each_set_bit(bit_nr, change, MTK_KPD_NUM_BITS) {
+               /*
+                * Registers are 32bits, but only bits [15:0] are used to
+                * indicate key status.
+                */
+               if (bit_nr % 32 >= 16)
+                       continue;
+
+               row = bit_nr / 32;
+               col = bit_nr % 32;
+               scancode = MATRIX_SCAN_CODE(row, col, row_shift);
+               /* 1: not pressed, 0: pressed */
+               pressed = !test_bit(bit_nr, new_state);
+               dev_dbg(&keypad->input_dev->dev, "%s",
+                       pressed ? "pressed" : "released");
+
+               input_event(keypad->input_dev, EV_MSC, MSC_SCAN, scancode);
+               input_report_key(keypad->input_dev, keycode[scancode], pressed);
+               input_sync(keypad->input_dev);
+
+               dev_dbg(&keypad->input_dev->dev,
+                       "report Linux keycode = %d\n", keycode[scancode]);
+       }
+
+       bitmap_copy(keypad->keymap_state, new_state, MTK_KPD_NUM_BITS);
+
+       return IRQ_HANDLED;
+}
+
+static void mt6779_keypad_clk_disable(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
+static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
+{
+       struct mt6779_keypad *keypad;
+       int irq;
+       u32 debounce;
+       bool wakeup;
+       int error;
+
+       keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
+       if (!keypad)
+               return -ENOMEM;
+
+       keypad->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(keypad->base))
+               return PTR_ERR(keypad->base);
+
+       keypad->regmap = devm_regmap_init_mmio(&pdev->dev, keypad->base,
+                                              &mt6779_keypad_regmap_cfg);
+       if (IS_ERR(keypad->regmap)) {
+               dev_err(&pdev->dev,
+                       "regmap init failed:%pe\n", keypad->regmap);
+               return PTR_ERR(keypad->regmap);
+       }
+
+       bitmap_fill(keypad->keymap_state, MTK_KPD_NUM_BITS);
+
+       keypad->input_dev = devm_input_allocate_device(&pdev->dev);
+       if (!keypad->input_dev) {
+               dev_err(&pdev->dev, "Failed to allocate input dev\n");
+               return -ENOMEM;
+       }
+
+       keypad->input_dev->name = MTK_KPD_NAME;
+       keypad->input_dev->id.bustype = BUS_HOST;
+
+       error = matrix_keypad_parse_properties(&pdev->dev, &keypad->n_rows,
+                                              &keypad->n_cols);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to parse keypad params\n");
+               return error;
+       }
+
+       if (device_property_read_u32(&pdev->dev, "debounce-delay-ms",
+                                    &debounce))
+               debounce = 16;
+
+       if (debounce > MTK_KPD_DEBOUNCE_MAX_MS) {
+               dev_err(&pdev->dev,
+                       "Debounce time exceeds the maximum allowed time %dms\n",
+                       MTK_KPD_DEBOUNCE_MAX_MS);
+               return -EINVAL;
+       }
+
+       wakeup = device_property_read_bool(&pdev->dev, "wakeup-source");
+
+       dev_dbg(&pdev->dev, "n_row=%d n_col=%d debounce=%d\n",
+               keypad->n_rows, keypad->n_cols, debounce);
+
+       error = matrix_keypad_build_keymap(NULL, NULL,
+                                          keypad->n_rows, keypad->n_cols,
+                                          NULL, keypad->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to build keymap\n");
+               return error;
+       }
+
+       input_set_capability(keypad->input_dev, EV_MSC, MSC_SCAN);
+
+       regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
+                    (debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+
+       keypad->clk = devm_clk_get(&pdev->dev, "kpd");
+       if (IS_ERR(keypad->clk))
+               return PTR_ERR(keypad->clk);
+
+       error = clk_prepare_enable(keypad->clk);
+       if (error) {
+               dev_err(&pdev->dev, "cannot prepare/enable keypad clock\n");
+               return error;
+       }
+
+       error = devm_add_action_or_reset(&pdev->dev, mt6779_keypad_clk_disable,
+                                        keypad->clk);
+       if (error)
+               return error;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       error = devm_request_threaded_irq(&pdev->dev, irq,
+                                         NULL, mt6779_keypad_irq_handler,
+                                         IRQF_ONESHOT, MTK_KPD_NAME, keypad);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to request IRQ#%d: %d\n",
+                       irq, error);
+               return error;
+       }
+
+       error = input_register_device(keypad->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to register device\n");
+               return error;
+       }
+
+       error = device_init_wakeup(&pdev->dev, wakeup);
+       if (error)
+               dev_warn(&pdev->dev, "device_init_wakeup() failed: %d\n",
+                        error);
+
+       return 0;
+}
+
+static const struct of_device_id mt6779_keypad_of_match[] = {
+       { .compatible = "mediatek,mt6779-keypad" },
+       { .compatible = "mediatek,mt6873-keypad" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver mt6779_keypad_pdrv = {
+       .probe = mt6779_keypad_pdrv_probe,
+       .driver = {
+                  .name = MTK_KPD_NAME,
+                  .of_match_table = mt6779_keypad_of_match,
+       },
+};
+module_platform_driver(mt6779_keypad_pdrv);
+
+MODULE_AUTHOR("Mediatek Corporation");
+MODULE_DESCRIPTION("MTK Keypad (KPD) Driver");
+MODULE_LICENSE("GPL");
index 62391d6..c31ab43 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6358/registers.h>
 #include <linux/mfd/mt6397/core.h>
 #include <linux/mfd/mt6397/registers.h>
 #include <linux/module.h>
@@ -74,11 +75,22 @@ static const struct mtk_pmic_regs mt6323_regs = {
        .pmic_rst_reg = MT6323_TOP_RST_MISC,
 };
 
+static const struct mtk_pmic_regs mt6358_regs = {
+       .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+               MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
+                                  0x2, MT6358_PSC_TOP_INT_CON0, 0x5),
+       .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+               MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
+                                  0x8, MT6358_PSC_TOP_INT_CON0, 0xa),
+       .pmic_rst_reg = MT6358_TOP_RST_MISC,
+};
+
 struct mtk_pmic_keys_info {
        struct mtk_pmic_keys *keys;
        const struct mtk_pmic_keys_regs *regs;
        unsigned int keycode;
        int irq;
+       int irq_r; /* optional: release irq if different */
        bool wakeup:1;
 };
 
@@ -188,6 +200,18 @@ static int mtk_pmic_key_setup(struct mtk_pmic_keys *keys,
                return ret;
        }
 
+       if (info->irq_r > 0) {
+               ret = devm_request_threaded_irq(keys->dev, info->irq_r, NULL,
+                                               mtk_pmic_keys_irq_handler_thread,
+                                               IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+                                               "mtk-pmic-keys", info);
+               if (ret) {
+                       dev_err(keys->dev, "Failed to request IRQ_r: %d: %d\n",
+                               info->irq, ret);
+                       return ret;
+               }
+       }
+
        input_set_capability(keys->input_dev, EV_KEY, info->keycode);
 
        return 0;
@@ -199,8 +223,11 @@ static int __maybe_unused mtk_pmic_keys_suspend(struct device *dev)
        int index;
 
        for (index = 0; index < MTK_PMIC_MAX_KEY_COUNT; index++) {
-               if (keys->keys[index].wakeup)
+               if (keys->keys[index].wakeup) {
                        enable_irq_wake(keys->keys[index].irq);
+                       if (keys->keys[index].irq_r > 0)
+                               enable_irq_wake(keys->keys[index].irq_r);
+               }
        }
 
        return 0;
@@ -212,8 +239,11 @@ static int __maybe_unused mtk_pmic_keys_resume(struct device *dev)
        int index;
 
        for (index = 0; index < MTK_PMIC_MAX_KEY_COUNT; index++) {
-               if (keys->keys[index].wakeup)
+               if (keys->keys[index].wakeup) {
                        disable_irq_wake(keys->keys[index].irq);
+                       if (keys->keys[index].irq_r > 0)
+                               disable_irq_wake(keys->keys[index].irq_r);
+               }
        }
 
        return 0;
@@ -229,6 +259,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
        }, {
                .compatible = "mediatek,mt6323-keys",
                .data = &mt6323_regs,
+       }, {
+               .compatible = "mediatek,mt6358-keys",
+               .data = &mt6358_regs,
        }, {
                /* sentinel */
        }
@@ -241,6 +274,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        unsigned int keycount;
        struct mt6397_chip *pmic_chip = dev_get_drvdata(pdev->dev.parent);
        struct device_node *node = pdev->dev.of_node, *child;
+       static const char *const irqnames[] = { "powerkey", "homekey" };
+       static const char *const irqnames_r[] = { "powerkey_r", "homekey_r" };
        struct mtk_pmic_keys *keys;
        const struct mtk_pmic_regs *mtk_pmic_regs;
        struct input_dev *input_dev;
@@ -268,7 +303,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        input_dev->id.version = 0x0001;
 
        keycount = of_get_available_child_count(node);
-       if (keycount > MTK_PMIC_MAX_KEY_COUNT) {
+       if (keycount > MTK_PMIC_MAX_KEY_COUNT ||
+           keycount > ARRAY_SIZE(irqnames)) {
                dev_err(keys->dev, "too many keys defined (%d)\n", keycount);
                return -EINVAL;
        }
@@ -276,12 +312,23 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        for_each_child_of_node(node, child) {
                keys->keys[index].regs = &mtk_pmic_regs->keys_regs[index];
 
-               keys->keys[index].irq = platform_get_irq(pdev, index);
+               keys->keys[index].irq =
+                       platform_get_irq_byname(pdev, irqnames[index]);
                if (keys->keys[index].irq < 0) {
                        of_node_put(child);
                        return keys->keys[index].irq;
                }
 
+               if (of_device_is_compatible(node, "mediatek,mt6358-keys")) {
+                       keys->keys[index].irq_r = platform_get_irq_byname(pdev,
+                                                                         irqnames_r[index]);
+
+                       if (keys->keys[index].irq_r < 0) {
+                               of_node_put(child);
+                               return keys->keys[index].irq_r;
+                       }
+               }
+
                error = of_property_read_u32(child,
                        "linux,keycodes", &keys->keys[index].keycode);
                if (error) {
index 7985192..b14a389 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2015  Dialog Semiconductor Ltd.
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/input.h>
@@ -182,13 +183,6 @@ static irqreturn_t da9063_onkey_irq_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void da9063_cancel_poll(void *data)
-{
-       struct da9063_onkey *onkey = data;
-
-       cancel_delayed_work_sync(&onkey->work);
-}
-
 static int da9063_onkey_probe(struct platform_device *pdev)
 {
        struct da9063_onkey *onkey;
@@ -234,9 +228,8 @@ static int da9063_onkey_probe(struct platform_device *pdev)
 
        input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
-       INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
-
-       error = devm_add_action(&pdev->dev, da9063_cancel_poll, onkey);
+       error = devm_delayed_work_autocancel(&pdev->dev, &onkey->work,
+                                            da9063_poll_on);
        if (error) {
                dev_err(&pdev->dev,
                        "Failed to add cancel poll action: %d\n",
index ffad142..434d48a 100644 (file)
@@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN2044", /* L470  */
        "LEN2054", /* E480 */
        "LEN2055", /* E580 */
+       "LEN2064", /* T14 Gen 1 AMD / P14s Gen 1 AMD */
        "LEN2068", /* T14 Gen 1 */
        "SYN3052", /* HP EliteBook 840 G4 */
        "SYN3221", /* HP 15-ay000 */
index 8970b49..9b02dd5 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
+#include <linux/timekeeping.h>
 
 #define DRIVER_NAME            "ps2-gpio"
 
 #define PS2_DATA_BIT7          8
 #define PS2_PARITY_BIT         9
 #define PS2_STOP_BIT           10
-#define PS2_TX_TIMEOUT         11
-#define PS2_ACK_BIT            12
+#define PS2_ACK_BIT            11
 
 #define PS2_DEV_RET_ACK                0xfa
 #define PS2_DEV_RET_NACK       0xfe
 
 #define PS2_CMD_RESEND         0xfe
 
+/*
+ * The PS2 protocol specifies a clock frequency between 10kHz and 16.7kHz,
+ * therefore the maximal interrupt interval should be 100us and the minimum
+ * interrupt interval should be ~60us. Let's allow +/- 20us for frequency
+ * deviations and interrupt latency.
+ *
+ * The data line must be samples after ~30us to 50us after the falling edge,
+ * since the device updates the data line at the rising edge.
+ *
+ * ___            ______            ______            ______            ___
+ *    \          /      \          /      \          /      \          /
+ *     \        /        \        /        \        /        \        /
+ *      \______/          \______/          \______/          \______/
+ *
+ *     |-----------------|                 |--------|
+ *          60us/100us                      30us/50us
+ */
+#define PS2_CLK_FREQ_MIN_HZ            10000
+#define PS2_CLK_FREQ_MAX_HZ            16700
+#define PS2_CLK_MIN_INTERVAL_US                ((1000 * 1000) / PS2_CLK_FREQ_MAX_HZ)
+#define PS2_CLK_MAX_INTERVAL_US                ((1000 * 1000) / PS2_CLK_FREQ_MIN_HZ)
+#define PS2_IRQ_MIN_INTERVAL_US                (PS2_CLK_MIN_INTERVAL_US - 20)
+#define PS2_IRQ_MAX_INTERVAL_US                (PS2_CLK_MAX_INTERVAL_US + 20)
+
 struct ps2_gpio_data {
        struct device *dev;
        struct serio *serio;
@@ -52,19 +76,30 @@ struct ps2_gpio_data {
        struct gpio_desc *gpio_data;
        bool write_enable;
        int irq;
-       unsigned char rx_cnt;
-       unsigned char rx_byte;
-       unsigned char tx_cnt;
-       unsigned char tx_byte;
-       struct completion tx_done;
-       struct mutex tx_mutex;
-       struct delayed_work tx_work;
+       ktime_t t_irq_now;
+       ktime_t t_irq_last;
+       struct {
+               unsigned char cnt;
+               unsigned char byte;
+       } rx;
+       struct {
+               unsigned char cnt;
+               unsigned char byte;
+               ktime_t t_xfer_start;
+               ktime_t t_xfer_end;
+               struct completion complete;
+               struct mutex mutex;
+               struct delayed_work work;
+       } tx;
 };
 
 static int ps2_gpio_open(struct serio *serio)
 {
        struct ps2_gpio_data *drvdata = serio->port_data;
 
+       drvdata->t_irq_last = 0;
+       drvdata->tx.t_xfer_end = 0;
+
        enable_irq(drvdata->irq);
        return 0;
 }
@@ -73,7 +108,7 @@ static void ps2_gpio_close(struct serio *serio)
 {
        struct ps2_gpio_data *drvdata = serio->port_data;
 
-       flush_delayed_work(&drvdata->tx_work);
+       flush_delayed_work(&drvdata->tx.work);
        disable_irq(drvdata->irq);
 }
 
@@ -85,9 +120,9 @@ static int __ps2_gpio_write(struct serio *serio, unsigned char val)
        gpiod_direction_output(drvdata->gpio_clk, 0);
 
        drvdata->mode = PS2_MODE_TX;
-       drvdata->tx_byte = val;
+       drvdata->tx.byte = val;
 
-       schedule_delayed_work(&drvdata->tx_work, usecs_to_jiffies(200));
+       schedule_delayed_work(&drvdata->tx.work, usecs_to_jiffies(200));
 
        return 0;
 }
@@ -98,12 +133,12 @@ static int ps2_gpio_write(struct serio *serio, unsigned char val)
        int ret = 0;
 
        if (in_task()) {
-               mutex_lock(&drvdata->tx_mutex);
+               mutex_lock(&drvdata->tx.mutex);
                __ps2_gpio_write(serio, val);
-               if (!wait_for_completion_timeout(&drvdata->tx_done,
+               if (!wait_for_completion_timeout(&drvdata->tx.complete,
                                                 msecs_to_jiffies(10000)))
                        ret = SERIO_TIMEOUT;
-               mutex_unlock(&drvdata->tx_mutex);
+               mutex_unlock(&drvdata->tx.mutex);
        } else {
                __ps2_gpio_write(serio, val);
        }
@@ -115,9 +150,10 @@ static void ps2_gpio_tx_work_fn(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
        struct ps2_gpio_data *drvdata = container_of(dwork,
-                                                   struct ps2_gpio_data,
-                                                   tx_work);
+                                                    struct ps2_gpio_data,
+                                                    tx.work);
 
+       drvdata->tx.t_xfer_start = ktime_get();
        enable_irq(drvdata->irq);
        gpiod_direction_output(drvdata->gpio_data, 0);
        gpiod_direction_input(drvdata->gpio_clk);
@@ -128,20 +164,31 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
        unsigned char byte, cnt;
        int data;
        int rxflags = 0;
-       static unsigned long old_jiffies;
+       s64 us_delta;
 
-       byte = drvdata->rx_byte;
-       cnt = drvdata->rx_cnt;
+       byte = drvdata->rx.byte;
+       cnt = drvdata->rx.cnt;
 
-       if (old_jiffies == 0)
-               old_jiffies = jiffies;
+       drvdata->t_irq_now = ktime_get();
+
+       /*
+        * We need to consider spurious interrupts happening right after
+        * a TX xfer finished.
+        */
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->tx.t_xfer_end);
+       if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US))
+               goto end;
 
-       if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->t_irq_last);
+       if (us_delta > PS2_IRQ_MAX_INTERVAL_US && cnt) {
                dev_err(drvdata->dev,
                        "RX: timeout, probably we missed an interrupt\n");
                goto err;
+       } else if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US)) {
+               /* Ignore spurious IRQs. */
+               goto end;
        }
-       old_jiffies = jiffies;
+       drvdata->t_irq_last = drvdata->t_irq_now;
 
        data = gpiod_get_value(drvdata->gpio_data);
        if (unlikely(data < 0)) {
@@ -178,8 +225,16 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
                        if (!drvdata->write_enable)
                                goto err;
                }
+               break;
+       case PS2_STOP_BIT:
+               /* stop bit should be high */
+               if (unlikely(!data)) {
+                       dev_err(drvdata->dev, "RX: stop bit should be high\n");
+                       goto err;
+               }
 
-               /* Do not send spurious ACK's and NACK's when write fn is
+               /*
+                * Do not send spurious ACK's and NACK's when write fn is
                 * not provided.
                 */
                if (!drvdata->write_enable) {
@@ -189,23 +244,11 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
                                break;
                }
 
-               /* Let's send the data without waiting for the stop bit to be
-                * sent. It may happen that we miss the stop bit. When this
-                * happens we have no way to recover from this, certainly
-                * missing the parity bit would be recognized when processing
-                * the stop bit. When missing both, data is lost.
-                */
                serio_interrupt(drvdata->serio, byte, rxflags);
                dev_dbg(drvdata->dev, "RX: sending byte 0x%x\n", byte);
-               break;
-       case PS2_STOP_BIT:
-               /* stop bit should be high */
-               if (unlikely(!data)) {
-                       dev_err(drvdata->dev, "RX: stop bit should be high\n");
-                       goto err;
-               }
+
                cnt = byte = 0;
-               old_jiffies = 0;
+
                goto end; /* success */
        default:
                dev_err(drvdata->dev, "RX: got out of sync with the device\n");
@@ -217,11 +260,10 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
 
 err:
        cnt = byte = 0;
-       old_jiffies = 0;
        __ps2_gpio_write(drvdata->serio, PS2_CMD_RESEND);
 end:
-       drvdata->rx_cnt = cnt;
-       drvdata->rx_byte = byte;
+       drvdata->rx.cnt = cnt;
+       drvdata->rx.byte = byte;
        return IRQ_HANDLED;
 }
 
@@ -229,20 +271,34 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
 {
        unsigned char byte, cnt;
        int data;
-       static unsigned long old_jiffies;
+       s64 us_delta;
+
+       cnt = drvdata->tx.cnt;
+       byte = drvdata->tx.byte;
 
-       cnt = drvdata->tx_cnt;
-       byte = drvdata->tx_byte;
+       drvdata->t_irq_now = ktime_get();
 
-       if (old_jiffies == 0)
-               old_jiffies = jiffies;
+       /*
+        * There might be pending IRQs since we disabled IRQs in
+        * __ps2_gpio_write().  We can expect at least one clock period until
+        * the device generates the first falling edge after releasing the
+        * clock line.
+        */
+       us_delta = ktime_us_delta(drvdata->t_irq_now,
+                                 drvdata->tx.t_xfer_start);
+       if (unlikely(us_delta < PS2_CLK_MIN_INTERVAL_US))
+               goto end;
 
-       if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->t_irq_last);
+       if (us_delta > PS2_IRQ_MAX_INTERVAL_US && cnt > 1) {
                dev_err(drvdata->dev,
                        "TX: timeout, probably we missed an interrupt\n");
                goto err;
+       } else if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US)) {
+               /* Ignore spurious IRQs. */
+               goto end;
        }
-       old_jiffies = jiffies;
+       drvdata->t_irq_last = drvdata->t_irq_now;
 
        switch (cnt) {
        case PS2_START_BIT:
@@ -270,27 +326,22 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
                /* release data line to generate stop bit */
                gpiod_direction_input(drvdata->gpio_data);
                break;
-       case PS2_TX_TIMEOUT:
-               /* Devices generate one extra clock pulse before sending the
-                * acknowledgment.
-                */
-               break;
        case PS2_ACK_BIT:
-               gpiod_direction_input(drvdata->gpio_data);
                data = gpiod_get_value(drvdata->gpio_data);
                if (data) {
                        dev_warn(drvdata->dev, "TX: received NACK, retry\n");
                        goto err;
                }
 
+               drvdata->tx.t_xfer_end = ktime_get();
                drvdata->mode = PS2_MODE_RX;
-               complete(&drvdata->tx_done);
+               complete(&drvdata->tx.complete);
 
                cnt = 1;
-               old_jiffies = 0;
                goto end; /* success */
        default:
-               /* Probably we missed the stop bit. Therefore we release data
+               /*
+                * Probably we missed the stop bit. Therefore we release data
                 * line and try again.
                 */
                gpiod_direction_input(drvdata->gpio_data);
@@ -303,11 +354,10 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
 
 err:
        cnt = 1;
-       old_jiffies = 0;
        gpiod_direction_input(drvdata->gpio_data);
-       __ps2_gpio_write(drvdata->serio, drvdata->tx_byte);
+       __ps2_gpio_write(drvdata->serio, drvdata->tx.byte);
 end:
-       drvdata->tx_cnt = cnt;
+       drvdata->tx.cnt = cnt;
        return IRQ_HANDLED;
 }
 
@@ -322,14 +372,19 @@ static irqreturn_t ps2_gpio_irq(int irq, void *dev_id)
 static int ps2_gpio_get_props(struct device *dev,
                                 struct ps2_gpio_data *drvdata)
 {
-       drvdata->gpio_data = devm_gpiod_get(dev, "data", GPIOD_IN);
+       enum gpiod_flags gflags;
+
+       /* Enforce open drain, since this is required by the PS/2 bus. */
+       gflags = GPIOD_IN | GPIOD_FLAGS_BIT_OPEN_DRAIN;
+
+       drvdata->gpio_data = devm_gpiod_get(dev, "data", gflags);
        if (IS_ERR(drvdata->gpio_data)) {
                dev_err(dev, "failed to request data gpio: %ld",
                        PTR_ERR(drvdata->gpio_data));
                return PTR_ERR(drvdata->gpio_data);
        }
 
-       drvdata->gpio_clk = devm_gpiod_get(dev, "clk", GPIOD_IN);
+       drvdata->gpio_clk = devm_gpiod_get(dev, "clk", gflags);
        if (IS_ERR(drvdata->gpio_clk)) {
                dev_err(dev, "failed to request clock gpio: %ld",
                        PTR_ERR(drvdata->gpio_clk));
@@ -387,7 +442,8 @@ static int ps2_gpio_probe(struct platform_device *pdev)
        serio->id.type = SERIO_8042;
        serio->open = ps2_gpio_open;
        serio->close = ps2_gpio_close;
-       /* Write can be enabled in platform/dt data, but possibly it will not
+       /*
+        * Write can be enabled in platform/dt data, but possibly it will not
         * work because of the tough timings.
         */
        serio->write = drvdata->write_enable ? ps2_gpio_write : NULL;
@@ -400,14 +456,15 @@ static int ps2_gpio_probe(struct platform_device *pdev)
        drvdata->dev = dev;
        drvdata->mode = PS2_MODE_RX;
 
-       /* Tx count always starts at 1, as the start bit is sent implicitly by
+       /*
+        * Tx count always starts at 1, as the start bit is sent implicitly by
         * host-to-device communication initialization.
         */
-       drvdata->tx_cnt = 1;
+       drvdata->tx.cnt = 1;
 
-       INIT_DELAYED_WORK(&drvdata->tx_work, ps2_gpio_tx_work_fn);
-       init_completion(&drvdata->tx_done);
-       mutex_init(&drvdata->tx_mutex);
+       INIT_DELAYED_WORK(&drvdata->tx.work, ps2_gpio_tx_work_fn);
+       init_completion(&drvdata->tx.complete);
+       mutex_init(&drvdata->tx.mutex);
 
        serio_register_port(serio);
        platform_set_drvdata(pdev, drvdata);
index ff7794c..43c7d6e 100644 (file)
@@ -638,6 +638,16 @@ config TOUCHSCREEN_MTOUCH
          To compile this driver as a module, choose M here: the
          module will be called mtouch.
 
+config TOUCHSCREEN_IMAGIS
+       tristate "Imagis touchscreen support"
+       depends on I2C
+       help
+         Say Y here if you have an Imagis IST30xxC touchscreen.
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called imagis.
+
 config TOUCHSCREEN_IMX6UL_TSC
        tristate "Freescale i.MX6UL touchscreen controller"
        depends on ((OF && GPIOLIB) || COMPILE_TEST) && HAS_IOMEM
index 39a8127..557f84f 100644 (file)
@@ -49,6 +49,7 @@ obj-$(CONFIG_TOUCHSCREEN_GOODIX)      += goodix_ts.o
 obj-$(CONFIG_TOUCHSCREEN_HIDEEP)       += hideep.o
 obj-$(CONFIG_TOUCHSCREEN_ILI210X)      += ili210x.o
 obj-$(CONFIG_TOUCHSCREEN_ILITEK)       += ilitek_ts_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_IMAGIS)       += imagis.o
 obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC)   += imx6ul_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_INEXIO)       += inexio.o
 obj-$(CONFIG_TOUCHSCREEN_IPROC)                += bcm_iproc_tsc.o
index 752e8ba..3ad9870 100644 (file)
@@ -298,32 +298,17 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
        return -ENOMSG;
 }
 
-static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
+static int goodix_create_pen_input(struct goodix_ts_data *ts)
 {
        struct device *dev = &ts->client->dev;
        struct input_dev *input;
 
        input = devm_input_allocate_device(dev);
        if (!input)
-               return NULL;
-
-       input_alloc_absinfo(input);
-       if (!input->absinfo) {
-               input_free_device(input);
-               return NULL;
-       }
-
-       input->absinfo[ABS_X] = ts->input_dev->absinfo[ABS_MT_POSITION_X];
-       input->absinfo[ABS_Y] = ts->input_dev->absinfo[ABS_MT_POSITION_Y];
-       __set_bit(ABS_X, input->absbit);
-       __set_bit(ABS_Y, input->absbit);
-       input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+               return -ENOMEM;
 
-       input_set_capability(input, EV_KEY, BTN_TOUCH);
-       input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
-       input_set_capability(input, EV_KEY, BTN_STYLUS);
-       input_set_capability(input, EV_KEY, BTN_STYLUS2);
-       __set_bit(INPUT_PROP_DIRECT, input->propbit);
+       input_copy_abs(input, ABS_X, ts->input_dev, ABS_MT_POSITION_X);
+       input_copy_abs(input, ABS_Y, ts->input_dev, ABS_MT_POSITION_Y);
        /*
         * The resolution of these touchscreens is about 10 units/mm, the actual
         * resolution does not matter much since we set INPUT_PROP_DIRECT.
@@ -331,6 +316,13 @@ static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
         */
        input_abs_set_res(input, ABS_X, 10);
        input_abs_set_res(input, ABS_Y, 10);
+       input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+
+       input_set_capability(input, EV_KEY, BTN_TOUCH);
+       input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+       input_set_capability(input, EV_KEY, BTN_STYLUS);
+       input_set_capability(input, EV_KEY, BTN_STYLUS2);
+       __set_bit(INPUT_PROP_DIRECT, input->propbit);
 
        input->name = "Goodix Active Pen";
        input->phys = "input/pen";
@@ -340,25 +332,23 @@ static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
                input->id.product = 0x1001;
        input->id.version = ts->version;
 
-       if (input_register_device(input) != 0) {
-               input_free_device(input);
-               return NULL;
-       }
-
-       return input;
+       ts->input_pen = input;
+       return 0;
 }
 
 static void goodix_ts_report_pen_down(struct goodix_ts_data *ts, u8 *data)
 {
-       int input_x, input_y, input_w;
+       int input_x, input_y, input_w, error;
        u8 key_value;
 
-       if (!ts->input_pen) {
-               ts->input_pen = goodix_create_pen_input(ts);
-               if (!ts->input_pen)
-                       return;
+       if (!ts->pen_input_registered) {
+               error = input_register_device(ts->input_pen);
+               ts->pen_input_registered = (error == 0) ? 1 : error;
        }
 
+       if (ts->pen_input_registered < 0)
+               return;
+
        if (ts->contact_size == 9) {
                input_x = get_unaligned_le16(&data[4]);
                input_y = get_unaligned_le16(&data[6]);
@@ -1215,6 +1205,17 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
                return error;
        }
 
+       /*
+        * Create the input_pen device before goodix_request_irq() calls
+        * devm_request_threaded_irq() so that the devm framework frees
+        * it after disabling the irq.
+        * Unfortunately there is no way to detect if the touchscreen has pen
+        * support, so registering the dev is delayed till the first pen event.
+        */
+       error = goodix_create_pen_input(ts);
+       if (error)
+               return error;
+
        ts->irq_flags = goodix_irq_flags[ts->int_trigger_type] | IRQF_ONESHOT;
        error = goodix_request_irq(ts);
        if (error) {
index fa8602e..87797cc 100644 (file)
@@ -94,6 +94,7 @@ struct goodix_ts_data {
        u16 version;
        bool reset_controller_at_probe;
        bool load_cfg_from_disk;
+       int pen_input_registered;
        struct completion firmware_loading_complete;
        unsigned long irq_flags;
        enum goodix_irq_pin_access_method irq_pin_access_method;
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
new file mode 100644 (file)
index 0000000..e2697e6
--- /dev/null
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#define IST3038C_HIB_ACCESS            (0x800B << 16)
+#define IST3038C_DIRECT_ACCESS         BIT(31)
+#define IST3038C_REG_CHIPID            0x40001000
+#define IST3038C_REG_HIB_BASE          0x30000100
+#define IST3038C_REG_TOUCH_STATUS      (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS)
+#define IST3038C_REG_TOUCH_COORD       (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x8)
+#define IST3038C_REG_INTR_MESSAGE      (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x4)
+#define IST3038C_WHOAMI                        0x38c
+#define IST3038C_CHIP_ON_DELAY_MS      60
+#define IST3038C_I2C_RETRY_COUNT       3
+#define IST3038C_MAX_FINGER_NUM                10
+#define IST3038C_X_MASK                        GENMASK(23, 12)
+#define IST3038C_X_SHIFT               12
+#define IST3038C_Y_MASK                        GENMASK(11, 0)
+#define IST3038C_AREA_MASK             GENMASK(27, 24)
+#define IST3038C_AREA_SHIFT            24
+#define IST3038C_FINGER_COUNT_MASK     GENMASK(15, 12)
+#define IST3038C_FINGER_COUNT_SHIFT    12
+#define IST3038C_FINGER_STATUS_MASK    GENMASK(9, 0)
+
+struct imagis_ts {
+       struct i2c_client *client;
+       struct input_dev *input_dev;
+       struct touchscreen_properties prop;
+       struct regulator_bulk_data supplies[2];
+};
+
+static int imagis_i2c_read_reg(struct imagis_ts *ts,
+                              unsigned int reg, u32 *data)
+{
+       __be32 ret_be;
+       __be32 reg_be = cpu_to_be32(reg);
+       struct i2c_msg msg[] = {
+               {
+                       .addr = ts->client->addr,
+                       .flags = 0,
+                       .buf = (unsigned char *)&reg_be,
+                       .len = sizeof(reg_be),
+               }, {
+                       .addr = ts->client->addr,
+                       .flags = I2C_M_RD,
+                       .buf = (unsigned char *)&ret_be,
+                       .len = sizeof(ret_be),
+               },
+       };
+       int ret, error;
+       int retry = IST3038C_I2C_RETRY_COUNT;
+
+       /* Retry in case the controller fails to respond */
+       do {
+               ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+               if (ret == ARRAY_SIZE(msg)) {
+                       *data = be32_to_cpu(ret_be);
+                       return 0;
+               }
+
+               error = ret < 0 ? ret : -EIO;
+               dev_err(&ts->client->dev,
+                       "%s - i2c_transfer failed: %d (%d)\n",
+                       __func__, error, ret);
+       } while (--retry);
+
+       return error;
+}
+
+static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+{
+       struct imagis_ts *ts = dev_id;
+       u32 intr_message, finger_status;
+       unsigned int finger_count, finger_pressed;
+       int i;
+       int error;
+
+       error = imagis_i2c_read_reg(ts, IST3038C_REG_INTR_MESSAGE,
+                                   &intr_message);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "failed to read the interrupt message: %d\n", error);
+               goto out;
+       }
+
+       finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
+                               IST3038C_FINGER_COUNT_SHIFT;
+       if (finger_count > IST3038C_MAX_FINGER_NUM) {
+               dev_err(&ts->client->dev,
+                       "finger count %d is more than maximum supported\n",
+                       finger_count);
+               goto out;
+       }
+
+       finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
+
+       for (i = 0; i < finger_count; i++) {
+               error = imagis_i2c_read_reg(ts,
+                                           IST3038C_REG_TOUCH_COORD + (i * 4),
+                                           &finger_status);
+               if (error) {
+                       dev_err(&ts->client->dev,
+                               "failed to read coordinates for finger %d: %d\n",
+                               i, error);
+                       goto out;
+               }
+
+               input_mt_slot(ts->input_dev, i);
+               input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+                                          finger_pressed & BIT(i));
+               touchscreen_report_pos(ts->input_dev, &ts->prop,
+                                      (finger_status & IST3038C_X_MASK) >>
+                                               IST3038C_X_SHIFT,
+                                      finger_status & IST3038C_Y_MASK, 1);
+               input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+                                (finger_status & IST3038C_AREA_MASK) >>
+                                       IST3038C_AREA_SHIFT);
+       }
+
+       input_mt_sync_frame(ts->input_dev);
+       input_sync(ts->input_dev);
+
+out:
+       return IRQ_HANDLED;
+}
+
+static void imagis_power_off(void *_ts)
+{
+       struct imagis_ts *ts = _ts;
+
+       regulator_bulk_disable(ARRAY_SIZE(ts->supplies), ts->supplies);
+}
+
+static int imagis_power_on(struct imagis_ts *ts)
+{
+       int error;
+
+       error = regulator_bulk_enable(ARRAY_SIZE(ts->supplies), ts->supplies);
+       if (error)
+               return error;
+
+       msleep(IST3038C_CHIP_ON_DELAY_MS);
+
+       return 0;
+}
+
+static int imagis_start(struct imagis_ts *ts)
+{
+       int error;
+
+       error = imagis_power_on(ts);
+       if (error)
+               return error;
+
+       enable_irq(ts->client->irq);
+
+       return 0;
+}
+
+static int imagis_stop(struct imagis_ts *ts)
+{
+       disable_irq(ts->client->irq);
+
+       imagis_power_off(ts);
+
+       return 0;
+}
+
+static int imagis_input_open(struct input_dev *dev)
+{
+       struct imagis_ts *ts = input_get_drvdata(dev);
+
+       return imagis_start(ts);
+}
+
+static void imagis_input_close(struct input_dev *dev)
+{
+       struct imagis_ts *ts = input_get_drvdata(dev);
+
+       imagis_stop(ts);
+}
+
+static int imagis_init_input_dev(struct imagis_ts *ts)
+{
+       struct input_dev *input_dev;
+       int error;
+
+       input_dev = devm_input_allocate_device(&ts->client->dev);
+       if (!input_dev)
+               return -ENOMEM;
+
+       ts->input_dev = input_dev;
+
+       input_dev->name = "Imagis capacitive touchscreen";
+       input_dev->phys = "input/ts";
+       input_dev->id.bustype = BUS_I2C;
+       input_dev->open = imagis_input_open;
+       input_dev->close = imagis_input_close;
+
+       input_set_drvdata(input_dev, ts);
+
+       input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+       input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+       input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+       touchscreen_parse_properties(input_dev, true, &ts->prop);
+       if (!ts->prop.max_x || !ts->prop.max_y) {
+               dev_err(&ts->client->dev,
+                       "Touchscreen-size-x and/or touchscreen-size-y not set in dts\n");
+               return -EINVAL;
+       }
+
+       error = input_mt_init_slots(input_dev,
+                                   IST3038C_MAX_FINGER_NUM,
+                                   INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "Failed to initialize MT slots: %d", error);
+               return error;
+       }
+
+       error = input_register_device(input_dev);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "Failed to register input device: %d", error);
+               return error;
+       }
+
+       return 0;
+}
+
+static int imagis_init_regulators(struct imagis_ts *ts)
+{
+       struct i2c_client *client = ts->client;
+
+       ts->supplies[0].supply = "vdd";
+       ts->supplies[1].supply = "vddio";
+       return devm_regulator_bulk_get(&client->dev,
+                                      ARRAY_SIZE(ts->supplies),
+                                      ts->supplies);
+}
+
+static int imagis_probe(struct i2c_client *i2c)
+{
+       struct device *dev = &i2c->dev;
+       struct imagis_ts *ts;
+       int chip_id, error;
+
+       ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+       if (!ts)
+               return -ENOMEM;
+
+       ts->client = i2c;
+
+       error = imagis_init_regulators(ts);
+       if (error) {
+               dev_err(dev, "regulator init error: %d\n", error);
+               return error;
+       }
+
+       error = imagis_power_on(ts);
+       if (error) {
+               dev_err(dev, "failed to enable regulators: %d\n", error);
+               return error;
+       }
+
+       error = devm_add_action_or_reset(dev, imagis_power_off, ts);
+       if (error) {
+               dev_err(dev, "failed to install poweroff action: %d\n", error);
+               return error;
+       }
+
+       error = imagis_i2c_read_reg(ts,
+                       IST3038C_REG_CHIPID | IST3038C_DIRECT_ACCESS,
+                       &chip_id);
+       if (error) {
+               dev_err(dev, "chip ID read failure: %d\n", error);
+               return error;
+       }
+
+       if (chip_id != IST3038C_WHOAMI) {
+               dev_err(dev, "unknown chip ID: 0x%x\n", chip_id);
+               return -EINVAL;
+       }
+
+       error = devm_request_threaded_irq(dev, i2c->irq,
+                                         NULL, imagis_interrupt,
+                                         IRQF_ONESHOT | IRQF_NO_AUTOEN,
+                                         "imagis-touchscreen", ts);
+       if (error) {
+               dev_err(dev, "IRQ %d allocation failure: %d\n",
+                       i2c->irq, error);
+               return error;
+       }
+
+       error = imagis_init_input_dev(ts);
+       if (error)
+               return error;
+
+       return 0;
+}
+
+static int __maybe_unused imagis_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct imagis_ts *ts = i2c_get_clientdata(client);
+       int retval = 0;
+
+       mutex_lock(&ts->input_dev->mutex);
+
+       if (input_device_enabled(ts->input_dev))
+               retval = imagis_stop(ts);
+
+       mutex_unlock(&ts->input_dev->mutex);
+
+       return retval;
+}
+
+static int __maybe_unused imagis_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct imagis_ts *ts = i2c_get_clientdata(client);
+       int retval = 0;
+
+       mutex_lock(&ts->input_dev->mutex);
+
+       if (input_device_enabled(ts->input_dev))
+               retval = imagis_start(ts);
+
+       mutex_unlock(&ts->input_dev->mutex);
+
+       return retval;
+}
+
+static SIMPLE_DEV_PM_OPS(imagis_pm_ops, imagis_suspend, imagis_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id imagis_of_match[] = {
+       { .compatible = "imagis,ist3038c", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, imagis_of_match);
+#endif
+
+static struct i2c_driver imagis_ts_driver = {
+       .driver = {
+               .name = "imagis-touchscreen",
+               .pm = &imagis_pm_ops,
+               .of_match_table = of_match_ptr(imagis_of_match),
+       },
+       .probe_new = imagis_probe,
+};
+
+module_i2c_driver(imagis_ts_driver);
+
+MODULE_DESCRIPTION("Imagis IST3038C Touchscreen Driver");
+MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
+MODULE_LICENSE("GPL");
index b3fa712..34c4cca 100644 (file)
@@ -486,11 +486,11 @@ static int iqs5xx_axis_init(struct i2c_client *client)
 {
        struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
        struct touchscreen_properties *prop = &iqs5xx->prop;
-       struct input_dev *input;
+       struct input_dev *input = iqs5xx->input;
        u16 max_x, max_y;
        int error;
 
-       if (!iqs5xx->input) {
+       if (!input) {
                input = devm_input_allocate_device(&client->dev);
                if (!input)
                        return -ENOMEM;
@@ -512,11 +512,11 @@ static int iqs5xx_axis_init(struct i2c_client *client)
        if (error)
                return error;
 
-       input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
-       input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
-       input_set_abs_params(iqs5xx->input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+       input_set_abs_params(input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
 
-       touchscreen_parse_properties(iqs5xx->input, true, prop);
+       touchscreen_parse_properties(input, true, prop);
 
        /*
         * The device reserves 0xFFFF for coordinates that correspond to slots
@@ -540,7 +540,7 @@ static int iqs5xx_axis_init(struct i2c_client *client)
                        return error;
        }
 
-       error = input_mt_init_slots(iqs5xx->input, IQS5XX_NUM_CONTACTS,
+       error = input_mt_init_slots(input, IQS5XX_NUM_CONTACTS,
                                    INPUT_MT_DIRECT);
        if (error)
                dev_err(&client->dev, "Failed to initialize slots: %d\n",
@@ -674,7 +674,7 @@ static irqreturn_t iqs5xx_irq(int irq, void *data)
                input_mt_slot(input, i);
                if (input_mt_report_slot_state(input, MT_TOOL_FINGER,
                                               pressure != 0)) {
-                       touchscreen_report_pos(iqs5xx->input, &iqs5xx->prop,
+                       touchscreen_report_pos(input, &iqs5xx->prop,
                                               be16_to_cpu(touch_data->abs_x),
                                               be16_to_cpu(touch_data->abs_y),
                                               true);
index bc11203..72e0b76 100644 (file)
@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
 
        err = pm_runtime_get_sync(&sdata->client->dev);
        if (err < 0)
-               return err;
+               goto out;
 
        err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
        if (err)
-               return err;
+               goto out;
 
        mutex_lock(&sdata->mutex);
        sdata->running = true;
@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
                                 "failed to enable touchkey\n");
        }
 
-       return 0;
+out:
+       pm_runtime_put_noidle(&sdata->client->dev);
+       return err;
 }
 
 static void stmfts_input_close(struct input_dev *dev)
index 27810f6..72c7258 100644 (file)
@@ -88,6 +88,8 @@ struct tsc200x {
        int                     in_z1;
        int                     in_z2;
 
+       struct touchscreen_properties prop;
+
        spinlock_t              lock;
        struct timer_list       penup_timer;
 
@@ -113,8 +115,7 @@ static void tsc200x_update_pen_state(struct tsc200x *ts,
                                     int x, int y, int pressure)
 {
        if (pressure) {
-               input_report_abs(ts->idev, ABS_X, x);
-               input_report_abs(ts->idev, ABS_Y, y);
+               touchscreen_report_pos(ts->idev, &ts->prop, x, y, false);
                input_report_abs(ts->idev, ABS_PRESSURE, pressure);
                if (!ts->pen_down) {
                        input_report_key(ts->idev, BTN_TOUCH, !!pressure);
@@ -533,7 +534,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
        input_set_abs_params(input_dev, ABS_PRESSURE,
                             0, MAX_12BIT, TSC200X_DEF_P_FUZZ, 0);
 
-       touchscreen_parse_properties(input_dev, false, NULL);
+       touchscreen_parse_properties(input_dev, false, &ts->prop);
 
        /* Ensure the touchscreen is off */
        tsc200x_stop_scan(ts);
diff --git a/drivers/input/vivaldi-fmap.c b/drivers/input/vivaldi-fmap.c
new file mode 100644 (file)
index 0000000..6dae83d
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for ChromeOS Vivaldi keyboard function row mapping
+ *
+ * Copyright (C) 2022 Google, Inc
+ */
+
+#include <linux/export.h>
+#include <linux/input/vivaldi-fmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+/**
+ * vivaldi_function_row_physmap_show - Print vivaldi function row physmap attribute
+ * @data: The vivaldi function row map
+ * @buf: Buffer to print the function row phsymap to
+ */
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+                                         char *buf)
+{
+       ssize_t size = 0;
+       int i;
+       const u32 *physmap = data->function_row_physmap;
+
+       if (!data->num_function_row_keys)
+               return 0;
+
+       for (i = 0; i < data->num_function_row_keys; i++)
+               size += scnprintf(buf + size, PAGE_SIZE - size,
+                                 "%s%02X", size ? " " : "", physmap[i]);
+       if (size)
+               size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+       return size;
+}
+EXPORT_SYMBOL_GPL(vivaldi_function_row_physmap_show);
+
+MODULE_LICENSE("GPL");
index a7e3eb9..a32050f 100644 (file)
@@ -351,9 +351,6 @@ static ssize_t dev_attribute_show(struct device *dev,
         * we still can use 'ubi->ubi_num'.
         */
        ubi = container_of(dev, struct ubi_device, dev);
-       ubi = ubi_get_device(ubi->ubi_num);
-       if (!ubi)
-               return -ENODEV;
 
        if (attr == &dev_eraseblock_size)
                ret = sprintf(buf, "%d\n", ubi->leb_size);
@@ -382,7 +379,6 @@ static ssize_t dev_attribute_show(struct device *dev,
        else
                ret = -EINVAL;
 
-       ubi_put_device(ubi);
        return ret;
 }
 
@@ -979,9 +975,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
                        goto out_detach;
        }
 
-       /* Make device "available" before it becomes accessible via sysfs */
-       ubi_devices[ubi_num] = ubi;
-
        err = uif_init(ubi);
        if (err)
                goto out_detach;
@@ -1026,6 +1019,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
        wake_up_process(ubi->bgt_thread);
        spin_unlock(&ubi->wl_lock);
 
+       ubi_devices[ubi_num] = ubi;
        ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
        return ubi_num;
 
@@ -1034,7 +1028,6 @@ out_debugfs:
 out_uif:
        uif_close(ubi);
 out_detach:
-       ubi_devices[ubi_num] = NULL;
        ubi_wl_close(ubi);
        ubi_free_all_volumes(ubi);
        vfree(ubi->vtbl);
index 022af59..6b5f1ff 100644 (file)
@@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                        if (err == UBI_IO_FF_BITFLIPS)
                                scrub = 1;
 
-                       add_aeb(ai, free, pnum, ec, scrub);
+                       ret = add_aeb(ai, free, pnum, ec, scrub);
+                       if (ret)
+                               goto out;
                        continue;
                } else if (err == 0 || err == UBI_IO_BITFLIPS) {
                        dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 0);
+               ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 0);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from used list */
@@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 0);
+               ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 0);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from scrub list */
@@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 1);
+               ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 1);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from erase list */
@@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 1);
+               ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 1);
+               if (ret)
+                       goto fail;
        }
 
        ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
index 139ee13..1bc7b3a 100644 (file)
@@ -56,16 +56,11 @@ static ssize_t vol_attribute_show(struct device *dev,
 {
        int ret;
        struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
-       struct ubi_device *ubi;
-
-       ubi = ubi_get_device(vol->ubi->ubi_num);
-       if (!ubi)
-               return -ENODEV;
+       struct ubi_device *ubi = vol->ubi;
 
        spin_lock(&ubi->volumes_lock);
        if (!ubi->volumes[vol->vol_id]) {
                spin_unlock(&ubi->volumes_lock);
-               ubi_put_device(ubi);
                return -ENODEV;
        }
        /* Take a reference to prevent volume removal */
@@ -103,7 +98,6 @@ static ssize_t vol_attribute_show(struct device *dev,
        vol->ref_count -= 1;
        ubi_assert(vol->ref_count >= 0);
        spin_unlock(&ubi->volumes_lock);
-       ubi_put_device(ubi);
        return ret;
 }
 
index 1a4b56f..b3b5bc1 100644 (file)
@@ -1637,8 +1637,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
                if (err)
                        goto out_fail;
 
-               can_put_echo_skb(skb, dev, 0, 0);
-
                if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
                        cccr = m_can_read(cdev, M_CAN_CCCR);
                        cccr &= ~CCCR_CMR_MASK;
@@ -1655,6 +1653,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
                        m_can_write(cdev, M_CAN_CCCR, cccr);
                }
                m_can_write(cdev, M_CAN_TXBTIE, 0x1);
+
+               can_put_echo_skb(skb, dev, 0, 0);
+
                m_can_write(cdev, M_CAN_TXBAR, 0x1);
                /* End of xmit function for version 3.0.x */
        } else {
index 325024b..f9dd8fd 100644 (file)
@@ -1786,7 +1786,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
  out_kfree_buf_rx:
        kfree(buf_rx);
 
-       return 0;
+       return err;
 }
 
 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
index 7bedcef..bbec331 100644 (file)
@@ -819,7 +819,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
 
                usb_unanchor_urb(urb);
                usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
-               dev_kfree_skb(skb);
 
                atomic_dec(&dev->active_tx_urbs);
 
index 67408e3..b29ba91 100644 (file)
@@ -1092,6 +1092,8 @@ static struct gs_can *gs_make_candev(unsigned int channel,
                dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended->dbrp_inc);
 
                dev->can.data_bittiming_const = &dev->data_bt_const;
+
+               kfree(bt_const_extended);
        }
 
        SET_NETDEV_DEV(netdev, &intf->dev);
index 77bddff..c45a814 100644 (file)
 #define MCBA_USB_RX_BUFF_SIZE 64
 #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
 
-/* MCBA endpoint numbers */
-#define MCBA_USB_EP_IN 1
-#define MCBA_USB_EP_OUT 1
-
 /* Microchip command id */
 #define MBCA_CMD_RECEIVE_MESSAGE 0xE3
 #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5
@@ -83,6 +79,8 @@ struct mcba_priv {
        atomic_t free_ctx_cnt;
        void *rxbuf[MCBA_MAX_RX_URBS];
        dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
+       int rx_pipe;
+       int tx_pipe;
 };
 
 /* CAN frame */
@@ -268,10 +266,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv,
 
        memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE);
 
-       usb_fill_bulk_urb(urb, priv->udev,
-                         usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf,
-                         MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback,
-                         ctx);
+       usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE,
+                         mcba_usb_write_bulk_callback, ctx);
 
        urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
        usb_anchor_urb(urb, &priv->tx_submitted);
@@ -364,7 +360,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
 xmit_failed:
        can_free_echo_skb(priv->netdev, ctx->ndx, NULL);
        mcba_usb_free_ctx(ctx);
-       dev_kfree_skb(skb);
        stats->tx_dropped++;
 
        return NETDEV_TX_OK;
@@ -608,7 +603,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
 resubmit_urb:
 
        usb_fill_bulk_urb(urb, priv->udev,
-                         usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT),
+                         priv->rx_pipe,
                          urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
                          mcba_usb_read_bulk_callback, priv);
 
@@ -653,7 +648,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
                urb->transfer_dma = buf_dma;
 
                usb_fill_bulk_urb(urb, priv->udev,
-                                 usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
+                                 priv->rx_pipe,
                                  buf, MCBA_USB_RX_BUFF_SIZE,
                                  mcba_usb_read_bulk_callback, priv);
                urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -807,6 +802,13 @@ static int mcba_usb_probe(struct usb_interface *intf,
        struct mcba_priv *priv;
        int err;
        struct usb_device *usbdev = interface_to_usbdev(intf);
+       struct usb_endpoint_descriptor *in, *out;
+
+       err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL);
+       if (err) {
+               dev_err(&intf->dev, "Can't find endpoints\n");
+               return err;
+       }
 
        netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
        if (!netdev) {
@@ -852,6 +854,9 @@ static int mcba_usb_probe(struct usb_interface *intf,
                goto cleanup_free_candev;
        }
 
+       priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress);
+       priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress);
+
        devm_can_led_init(netdev);
 
        /* Start USB dev only if we have successfully registered CAN device */
index 431af1e..b638604 100644 (file)
@@ -663,9 +663,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
        atomic_inc(&priv->active_tx_urbs);
 
        err = usb_submit_urb(urb, GFP_ATOMIC);
-       if (unlikely(err))
-               goto failed;
-       else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+       if (unlikely(err)) {
+               can_free_echo_skb(netdev, context->echo_index, NULL);
+
+               usb_unanchor_urb(urb);
+               usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+               atomic_dec(&priv->active_tx_urbs);
+
+               if (err == -ENODEV)
+                       netif_device_detach(netdev);
+               else
+                       netdev_warn(netdev, "failed tx_urb %d\n", err);
+               stats->tx_dropped++;
+       } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
                /* Slow down tx path */
                netif_stop_queue(netdev);
 
@@ -684,19 +695,6 @@ nofreecontext:
 
        return NETDEV_TX_BUSY;
 
-failed:
-       can_free_echo_skb(netdev, context->echo_index, NULL);
-
-       usb_unanchor_urb(urb);
-       usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
-
-       atomic_dec(&priv->active_tx_urbs);
-
-       if (err == -ENODEV)
-               netif_device_detach(netdev);
-       else
-               netdev_warn(netdev, "failed tx_urb %d\n", err);
-
 nomembuf:
        usb_free_urb(urb);
 
index 62d52e0..8d382b2 100644 (file)
@@ -1928,6 +1928,10 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
                case FLOW_ACTION_GATE:
                        size = struct_size(sgi, entries, a->gate.num_entries);
                        sgi = kzalloc(size, GFP_KERNEL);
+                       if (!sgi) {
+                               ret = -ENOMEM;
+                               goto err;
+                       }
                        vsc9959_psfp_parse_gate(a, sgi);
                        ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
                        if (ret) {
index d44dd70..79c64f4 100644 (file)
@@ -845,6 +845,7 @@ struct hnae3_handle {
        struct dentry *hnae3_dbgfs;
        /* protects concurrent contention between debugfs commands */
        struct mutex dbgfs_lock;
+       char **dbgfs_buf;
 
        /* Network interface message level enabled bits */
        u32 msg_enable;
index f726a5b..44d9b56 100644 (file)
@@ -1227,7 +1227,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
                return ret;
 
        mutex_lock(&handle->dbgfs_lock);
-       save_buf = &hns3_dbg_cmd[index].buf;
+       save_buf = &handle->dbgfs_buf[index];
 
        if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
            test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
@@ -1332,6 +1332,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
        int ret;
        u32 i;
 
+       handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
+                                        ARRAY_SIZE(hns3_dbg_cmd),
+                                        sizeof(*handle->dbgfs_buf),
+                                        GFP_KERNEL);
+       if (!handle->dbgfs_buf)
+               return -ENOMEM;
+
        hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
                                debugfs_create_dir(name, hns3_dbgfs_root);
        handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1380,9 +1387,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
        u32 i;
 
        for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
-               if (hns3_dbg_cmd[i].buf) {
-                       kvfree(hns3_dbg_cmd[i].buf);
-                       hns3_dbg_cmd[i].buf = NULL;
+               if (handle->dbgfs_buf[i]) {
+                       kvfree(handle->dbgfs_buf[i]);
+                       handle->dbgfs_buf[i] = NULL;
                }
 
        mutex_destroy(&handle->dbgfs_lock);
index 83aa145..97578ea 100644 (file)
@@ -49,7 +49,6 @@ struct hns3_dbg_cmd_info {
        enum hnae3_dbg_cmd cmd;
        enum hns3_dbg_dentry_type dentry;
        u32 buf_len;
-       char *buf;
        int (*init)(struct hnae3_handle *handle, unsigned int cmd);
 };
 
index 2a5e6a2..8cebb18 100644 (file)
@@ -10323,11 +10323,11 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
        }
 
        if (!ret) {
-               if (is_kill)
-                       hclge_rm_vport_vlan_table(vport, vlan_id, false);
-               else
+               if (!is_kill)
                        hclge_add_vport_vlan_table(vport, vlan_id,
                                                   writen_to_tbl);
+               else if (is_kill && vlan_id != 0)
+                       hclge_rm_vport_vlan_table(vport, vlan_id, false);
        } else if (is_kill) {
                /* when remove hw vlan filter failed, record the vlan id,
                 * and try to remove it from hw later, to be consistence
index b0b27bf..d4f1874 100644 (file)
@@ -710,7 +710,7 @@ static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
        struct ice_vsi *vsi = ring->vsi;
        u16 qid;
 
-       qid = ring->q_index - vsi->num_xdp_txq;
+       qid = ring->q_index - vsi->alloc_txq;
 
        if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
                return NULL;
index 88853a6..dfbcaf0 100644 (file)
@@ -608,6 +608,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
                 */
                dma_rmb();
 
+               if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
+                       break;
+
                xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
 
                size = le16_to_cpu(rx_desc->wb.pkt_len) &
@@ -754,7 +757,7 @@ skip:
                next_dd = next_dd + tx_thresh;
                if (next_dd >= desc_cnt)
                        next_dd = tx_thresh - 1;
-       } while (budget--);
+       } while (--budget);
 
        xdp_ring->next_dd = next_dd;
 
index e1bcb28..1f8c67f 100644 (file)
@@ -408,6 +408,9 @@ static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr,
                }
        }
 
+       if (!dev->phydev)
+               return -ENODEV;
+
        return phy_mii_ioctl(dev->phydev, ifr, cmd);
 }
 
index 85b24ed..cc5e48e 100644 (file)
@@ -5,6 +5,7 @@ config SPARX5_SWITCH
        depends on OF
        depends on ARCH_SPARX5 || COMPILE_TEST
        depends on PTP_1588_CLOCK_OPTIONAL
+       depends on BRIDGE || BRIDGE=n
        select PHYLINK
        select PHY_SPARX5_SERDES
        select RESET_CONTROLLER
index d6fdcdc..f906453 100644 (file)
@@ -91,11 +91,9 @@ static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
        }
 
        cpumask_copy(filter_mask, cpu_online_mask);
-       if (local_node) {
-               int numa_node = pcibus_to_node(efx->pci_dev->bus);
-
-               cpumask_and(filter_mask, filter_mask, cpumask_of_node(numa_node));
-       }
+       if (local_node)
+               cpumask_and(filter_mask, filter_mask,
+                           cpumask_of_pcibus(efx->pci_dev->bus));
 
        count = 0;
        for_each_cpu(cpu, filter_mask) {
@@ -386,8 +384,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
 #if defined(CONFIG_SMP)
 void efx_set_interrupt_affinity(struct efx_nic *efx)
 {
-       int numa_node = pcibus_to_node(efx->pci_dev->bus);
-       const struct cpumask *numa_mask = cpumask_of_node(numa_node);
+       const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
        struct efx_channel *channel;
        unsigned int cpu;
 
index 11f26b0..87838cb 100644 (file)
@@ -169,6 +169,24 @@ struct receive_queue {
        struct xdp_rxq_info xdp_rxq;
 };
 
+/* This structure can contain rss message with maximum settings for indirection table and keysize
+ * Note, that default structure that describes RSS configuration virtio_net_rss_config
+ * contains same info but can't handle table values.
+ * In any case, structure would be passed to virtio hw through sg_buf split by parts
+ * because table sizes may be differ according to the device configuration.
+ */
+#define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
+#define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
+struct virtio_net_ctrl_rss {
+       u32 hash_types;
+       u16 indirection_table_mask;
+       u16 unclassified_queue;
+       u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
+       u16 max_tx_vq;
+       u8 hash_key_length;
+       u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+};
+
 /* Control VQ buffers: protected by the rtnl lock */
 struct control_buf {
        struct virtio_net_ctrl_hdr hdr;
@@ -178,6 +196,7 @@ struct control_buf {
        u8 allmulti;
        __virtio16 vid;
        __virtio64 offloads;
+       struct virtio_net_ctrl_rss rss;
 };
 
 struct virtnet_info {
@@ -206,6 +225,14 @@ struct virtnet_info {
        /* Host will merge rx buffers for big packets (shake it! shake it!) */
        bool mergeable_rx_bufs;
 
+       /* Host supports rss and/or hash report */
+       bool has_rss;
+       bool has_rss_hash_report;
+       u8 rss_key_size;
+       u16 rss_indir_table_size;
+       u32 rss_hash_types_supported;
+       u32 rss_hash_types_saved;
+
        /* Has control virtqueue */
        bool has_cvq;
 
@@ -242,13 +269,13 @@ struct virtnet_info {
 };
 
 struct padded_vnet_hdr {
-       struct virtio_net_hdr_mrg_rxbuf hdr;
+       struct virtio_net_hdr_v1_hash hdr;
        /*
         * hdr is in a separate sg buffer, and data sg buffer shares same page
         * with this header sg. This padding makes next sg 16 byte aligned
         * after the header.
         */
-       char padding[4];
+       char padding[12];
 };
 
 static bool is_xdp_frame(void *ptr)
@@ -396,7 +423,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 
        hdr_len = vi->hdr_len;
        if (vi->mergeable_rx_bufs)
-               hdr_padded_len = sizeof(*hdr);
+               hdr_padded_len = hdr_len;
        else
                hdr_padded_len = sizeof(struct padded_vnet_hdr);
 
@@ -1123,6 +1150,35 @@ xdp_xmit:
        return NULL;
 }
 
+static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
+                               struct sk_buff *skb)
+{
+       enum pkt_hash_types rss_hash_type;
+
+       if (!hdr_hash || !skb)
+               return;
+
+       switch ((int)hdr_hash->hash_report) {
+       case VIRTIO_NET_HASH_REPORT_TCPv4:
+       case VIRTIO_NET_HASH_REPORT_UDPv4:
+       case VIRTIO_NET_HASH_REPORT_TCPv6:
+       case VIRTIO_NET_HASH_REPORT_UDPv6:
+       case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
+       case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
+               rss_hash_type = PKT_HASH_TYPE_L4;
+               break;
+       case VIRTIO_NET_HASH_REPORT_IPv4:
+       case VIRTIO_NET_HASH_REPORT_IPv6:
+       case VIRTIO_NET_HASH_REPORT_IPv6_EX:
+               rss_hash_type = PKT_HASH_TYPE_L3;
+               break;
+       case VIRTIO_NET_HASH_REPORT_NONE:
+       default:
+               rss_hash_type = PKT_HASH_TYPE_NONE;
+       }
+       skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+}
+
 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                        void *buf, unsigned int len, void **ctx,
                        unsigned int *xdp_xmit,
@@ -1157,6 +1213,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                return;
 
        hdr = skb_vnet_hdr(skb);
+       if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
+               virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
 
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1266,7 +1324,8 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
                                          struct ewma_pkt_len *avg_pkt_len,
                                          unsigned int room)
 {
-       const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+       struct virtnet_info *vi = rq->vq->vdev->priv;
+       const size_t hdr_len = vi->hdr_len;
        unsigned int len;
 
        if (room)
@@ -2183,6 +2242,174 @@ static void virtnet_get_ringparam(struct net_device *dev,
        ring->tx_pending = ring->tx_max_pending;
 }
 
+static bool virtnet_commit_rss_command(struct virtnet_info *vi)
+{
+       struct net_device *dev = vi->dev;
+       struct scatterlist sgs[4];
+       unsigned int sg_buf_size;
+
+       /* prepare sgs */
+       sg_init_table(sgs, 4);
+
+       sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
+       sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+
+       sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
+       sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+
+       sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
+                       - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
+       sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+
+       sg_buf_size = vi->rss_key_size;
+       sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+
+       if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
+                                 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
+                                 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
+               dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+               return false;
+       }
+       return true;
+}
+
+static void virtnet_init_default_rss(struct virtnet_info *vi)
+{
+       u32 indir_val = 0;
+       int i = 0;
+
+       vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+       vi->rss_hash_types_saved = vi->rss_hash_types_supported;
+       vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+                                               ? vi->rss_indir_table_size - 1 : 0;
+       vi->ctrl->rss.unclassified_queue = 0;
+
+       for (; i < vi->rss_indir_table_size; ++i) {
+               indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
+               vi->ctrl->rss.indirection_table[i] = indir_val;
+       }
+
+       vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
+       vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+
+       netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+}
+
+static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+       info->data = 0;
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case TCP_V6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case UDP_V4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case UDP_V6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case IPV4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+               break;
+       case IPV6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+               break;
+       default:
+               info->data = 0;
+               break;
+       }
+}
+
+static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+       u32 new_hashtypes = vi->rss_hash_types_saved;
+       bool is_disable = info->data & RXH_DISCARD;
+       bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
+
+       /* supports only 'sd', 'sdfn' and 'r' */
+       if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
+               return false;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
+               break;
+       case UDP_V4_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
+               break;
+       case IPV4_FLOW:
+               new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+               if (!is_disable)
+                       new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+               break;
+       case TCP_V6_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
+               break;
+       case UDP_V6_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
+               break;
+       case IPV6_FLOW:
+               new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+               if (!is_disable)
+                       new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+               break;
+       default:
+               /* unsupported flow */
+               return false;
+       }
+
+       /* if unsupported hashtype was set */
+       if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
+               return false;
+
+       if (new_hashtypes != vi->rss_hash_types_saved) {
+               vi->rss_hash_types_saved = new_hashtypes;
+               vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+               if (vi->dev->features & NETIF_F_RXHASH)
+                       return virtnet_commit_rss_command(vi);
+       }
+
+       return true;
+}
 
 static void virtnet_get_drvinfo(struct net_device *dev,
                                struct ethtool_drvinfo *info)
@@ -2411,6 +2638,92 @@ static void virtnet_update_settings(struct virtnet_info *vi)
                vi->duplex = duplex;
 }
 
+static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
+{
+       return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
+}
+
+static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
+{
+       return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
+}
+
+static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int i;
+
+       if (indir) {
+               for (i = 0; i < vi->rss_indir_table_size; ++i)
+                       indir[i] = vi->ctrl->rss.indirection_table[i];
+       }
+
+       if (key)
+               memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       return 0;
+}
+
+static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int i;
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       if (indir) {
+               for (i = 0; i < vi->rss_indir_table_size; ++i)
+                       vi->ctrl->rss.indirection_table[i] = indir[i];
+       }
+       if (key)
+               memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
+
+       virtnet_commit_rss_command(vi);
+
+       return 0;
+}
+
+static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int rc = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = vi->curr_queue_pairs;
+               break;
+       case ETHTOOL_GRXFH:
+               virtnet_get_hashflow(vi, info);
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+       }
+
+       return rc;
+}
+
+static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int rc = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_SRXFH:
+               if (!virtnet_set_hashflow(vi, info))
+                       rc = -EINVAL;
+
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+       }
+
+       return rc;
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
        .get_drvinfo = virtnet_get_drvinfo,
@@ -2426,6 +2739,12 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
        .set_link_ksettings = virtnet_set_link_ksettings,
        .set_coalesce = virtnet_set_coalesce,
        .get_coalesce = virtnet_get_coalesce,
+       .get_rxfh_key_size = virtnet_get_rxfh_key_size,
+       .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
+       .get_rxfh = virtnet_get_rxfh,
+       .set_rxfh = virtnet_set_rxfh,
+       .get_rxnfc = virtnet_get_rxnfc,
+       .set_rxnfc = virtnet_set_rxnfc,
 };
 
 static void virtnet_freeze_down(struct virtio_device *vdev)
@@ -2678,6 +2997,16 @@ static int virtnet_set_features(struct net_device *dev,
                vi->guest_offloads = offloads;
        }
 
+       if ((dev->features ^ features) & NETIF_F_RXHASH) {
+               if (features & NETIF_F_RXHASH)
+                       vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+               else
+                       vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+
+               if (!virtnet_commit_rss_command(vi))
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -2851,7 +3180,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
  */
 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
 {
-       const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+       const unsigned int hdr_len = vi->hdr_len;
        unsigned int rq_size = virtqueue_get_vring_size(vq);
        unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
        unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
@@ -3072,6 +3401,10 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
                             "VIRTIO_NET_F_CTRL_VQ") ||
             VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
             VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
                             "VIRTIO_NET_F_CTRL_VQ"))) {
                return false;
        }
@@ -3112,13 +3445,14 @@ static int virtnet_probe(struct virtio_device *vdev)
        u16 max_queue_pairs;
        int mtu;
 
-       /* Find if host supports multiqueue virtio_net device */
-       err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
-                                  struct virtio_net_config,
-                                  max_virtqueue_pairs, &max_queue_pairs);
+       /* Find if host supports multiqueue/rss virtio_net device */
+       max_queue_pairs = 1;
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+               max_queue_pairs =
+                    virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
 
        /* We need at least 2 queue's */
-       if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+       if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
            max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
            !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
                max_queue_pairs = 1;
@@ -3206,8 +3540,33 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
                vi->mergeable_rx_bufs = true;
 
-       if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
-           virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+               vi->has_rss_hash_report = true;
+
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+               vi->has_rss = true;
+
+       if (vi->has_rss || vi->has_rss_hash_report) {
+               vi->rss_indir_table_size =
+                       virtio_cread16(vdev, offsetof(struct virtio_net_config,
+                               rss_max_indirection_table_length));
+               vi->rss_key_size =
+                       virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+
+               vi->rss_hash_types_supported =
+                   virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
+               vi->rss_hash_types_supported &=
+                               ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
+                                 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
+                                 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
+
+               dev->hw_features |= NETIF_F_RXHASH;
+       }
+
+       if (vi->has_rss_hash_report)
+               vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
+       else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
+                virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
                vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
        else
                vi->hdr_len = sizeof(struct virtio_net_hdr);
@@ -3274,6 +3633,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                }
        }
 
+       if (vi->has_rss || vi->has_rss_hash_report)
+               virtnet_init_default_rss(vi);
+
        err = register_netdev(dev);
        if (err) {
                pr_debug("virtio_net: registering device failed\n");
@@ -3405,7 +3767,8 @@ static struct virtio_device_id id_table[] = {
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
        VIRTIO_NET_F_CTRL_MAC_ADDR, \
        VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
-       VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
+       VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
+       VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
 
 static unsigned int features[] = {
        VIRTNET_FEATURES,
index 9f28d0b..3e04af4 100644 (file)
@@ -425,6 +425,12 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb
                        err = -ENODEV;
                        goto out_err;
                }
+               if (!netif_is_vxlan(dev)) {
+                       NL_SET_ERR_MSG(cb->extack,
+                                      "The device is not a vxlan device");
+                       err = -EINVAL;
+                       goto out_err;
+               }
                err = vxlan_vnifilter_dump_dev(dev, skb, cb);
                /* if the dump completed without an error we return 0 here */
                if (err != -EMSGSIZE)
index 1de413b..8084e74 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include "queueing.h"
+#include <linux/skb_array.h>
 
 struct multicore_worker __percpu *
 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
@@ -42,7 +43,7 @@ void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
 {
        free_percpu(queue->worker);
        WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
-       ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
+       ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
 }
 
 #define NEXT(skb) ((skb)->prev)
index 6f07b94..0414d7a 100644 (file)
@@ -160,6 +160,7 @@ out:
        rcu_read_unlock_bh();
        return ret;
 #else
+       kfree_skb(skb);
        return -EAFNOSUPPORT;
 #endif
 }
@@ -241,7 +242,7 @@ int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
                endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
                endpoint->src4.s_addr = ip_hdr(skb)->daddr;
                endpoint->src_if4 = skb->skb_iif;
-       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+       } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
                endpoint->addr6.sin6_family = AF_INET6;
                endpoint->addr6.sin6_port = udp_hdr(skb)->source;
                endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
@@ -284,7 +285,7 @@ void wg_socket_set_peer_endpoint(struct wg_peer *peer,
                peer->endpoint.addr4 = endpoint->addr4;
                peer->endpoint.src4 = endpoint->src4;
                peer->endpoint.src_if4 = endpoint->src_if4;
-       } else if (endpoint->addr.sa_family == AF_INET6) {
+       } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) {
                peer->endpoint.addr6 = endpoint->addr6;
                peer->endpoint.src6 = endpoint->src6;
        } else {
index 347fe7a..5a29046 100644 (file)
@@ -10,12 +10,9 @@ menuconfig LIBNVDIMM
          ACPI-6-NFIT defined resources.  On platforms that define an
          NFIT, or otherwise can discover NVDIMM resources, a libnvdimm
          bus is registered to advertise PMEM (persistent memory)
-         namespaces (/dev/pmemX) and BLK (sliding mmio window(s))
-         namespaces (/dev/ndblkX.Y). A PMEM namespace refers to a
+         namespaces (/dev/pmemX). A PMEM namespace refers to a
          memory resource that may span multiple DIMMs and support DAX
-         (see CONFIG_DAX).  A BLK namespace refers to an NVDIMM control
-         region which exposes an mmio register set for windowed access
-         mode to non-volatile memory.
+         (see CONFIG_DAX).
 
 if LIBNVDIMM
 
@@ -38,19 +35,6 @@ config BLK_DEV_PMEM
 
          Say Y if you want to use an NVDIMM
 
-config ND_BLK
-       tristate "BLK: Block data window (aperture) device support"
-       default LIBNVDIMM
-       select ND_BTT if BTT
-       help
-         Support NVDIMMs, or other devices, that implement a BLK-mode
-         access capability.  BLK-mode access uses memory-mapped-i/o
-         apertures to access persistent media.
-
-         Say Y if your platform firmware emits an ACPI.NFIT table
-         (CONFIG_ACPI_NFIT), or otherwise exposes BLK-mode
-         capabilities.
-
 config ND_CLAIM
        bool
 
@@ -67,9 +51,8 @@ config BTT
          applications that rely on sector writes not being torn (a
          guarantee that typical disks provide) can continue to do so.
          The BTT manifests itself as an alternate personality for an
-         NVDIMM namespace, i.e. a namespace can be in raw mode (pmemX,
-         ndblkX.Y, etc...), or 'sectored' mode, (pmemXs, ndblkX.Ys,
-         etc...).
+         NVDIMM namespace, i.e. a namespace can be in raw mode pmemX,
+         or 'sectored' mode.
 
          Select Y if unsure
 
index 29203f3..ba0296d 100644 (file)
@@ -2,7 +2,6 @@
 obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
 obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
 obj-$(CONFIG_ND_BTT) += nd_btt.o
-obj-$(CONFIG_ND_BLK) += nd_blk.o
 obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
 obj-$(CONFIG_OF_PMEM) += of_pmem.o
 obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
@@ -11,13 +10,12 @@ nd_pmem-y := pmem.o
 
 nd_btt-y := btt.o
 
-nd_blk-y := blk.o
-
 nd_e820-y := e820.o
 
 libnvdimm-y := core.o
 libnvdimm-y += bus.o
 libnvdimm-y += dimm_devs.o
+libnvdimm-$(CONFIG_PERF_EVENTS) += nd_perf.o
 libnvdimm-y += dimm.o
 libnvdimm-y += region_devs.o
 libnvdimm-y += region.o
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
deleted file mode 100644 (file)
index 0a38738..0000000
+++ /dev/null
@@ -1,333 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * NVDIMM Block Window Driver
- * Copyright (c) 2014, Intel Corporation.
- */
-
-#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/nd.h>
-#include <linux/sizes.h>
-#include "nd.h"
-
-static u32 nsblk_meta_size(struct nd_namespace_blk *nsblk)
-{
-       return nsblk->lbasize - ((nsblk->lbasize >= 4096) ? 4096 : 512);
-}
-
-static u32 nsblk_internal_lbasize(struct nd_namespace_blk *nsblk)
-{
-       return roundup(nsblk->lbasize, INT_LBASIZE_ALIGNMENT);
-}
-
-static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
-{
-       return nsblk->lbasize - nsblk_meta_size(nsblk);
-}
-
-static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
-                               resource_size_t ns_offset, unsigned int len)
-{
-       int i;
-
-       for (i = 0; i < nsblk->num_resources; i++) {
-               if (ns_offset < resource_size(nsblk->res[i])) {
-                       if (ns_offset + len > resource_size(nsblk->res[i])) {
-                               dev_WARN_ONCE(&nsblk->common.dev, 1,
-                                       "illegal request\n");
-                               return SIZE_MAX;
-                       }
-                       return nsblk->res[i]->start + ns_offset;
-               }
-               ns_offset -= resource_size(nsblk->res[i]);
-       }
-
-       dev_WARN_ONCE(&nsblk->common.dev, 1, "request out of range\n");
-       return SIZE_MAX;
-}
-
-static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
-{
-       struct nd_region *nd_region;
-       struct device *parent;
-
-       parent = nsblk->common.dev.parent;
-       nd_region = container_of(parent, struct nd_region, dev);
-       return container_of(nd_region, struct nd_blk_region, nd_region);
-}
-
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
-               struct bio_integrity_payload *bip, u64 lba, int rw)
-{
-       struct nd_blk_region *ndbr = to_ndbr(nsblk);
-       unsigned int len = nsblk_meta_size(nsblk);
-       resource_size_t dev_offset, ns_offset;
-       u32 internal_lbasize, sector_size;
-       int err = 0;
-
-       internal_lbasize = nsblk_internal_lbasize(nsblk);
-       sector_size = nsblk_sector_size(nsblk);
-       ns_offset = lba * internal_lbasize + sector_size;
-       dev_offset = to_dev_offset(nsblk, ns_offset, len);
-       if (dev_offset == SIZE_MAX)
-               return -EIO;
-
-       while (len) {
-               unsigned int cur_len;
-               struct bio_vec bv;
-               void *iobuf;
-
-               bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
-               /*
-                * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
-                * .bv_offset already adjusted for iter->bi_bvec_done, and we
-                * can use those directly
-                */
-
-               cur_len = min(len, bv.bv_len);
-               iobuf = bvec_kmap_local(&bv);
-               err = ndbr->do_io(ndbr, dev_offset, iobuf, cur_len, rw);
-               kunmap_local(iobuf);
-               if (err)
-                       return err;
-
-               len -= cur_len;
-               dev_offset += cur_len;
-               if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
-                       return -EIO;
-       }
-
-       return err;
-}
-
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
-               struct bio_integrity_payload *bip, u64 lba, int rw)
-{
-       return 0;
-}
-#endif
-
-static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
-               struct bio_integrity_payload *bip, struct page *page,
-               unsigned int len, unsigned int off, int rw, sector_t sector)
-{
-       struct nd_blk_region *ndbr = to_ndbr(nsblk);
-       resource_size_t dev_offset, ns_offset;
-       u32 internal_lbasize, sector_size;
-       int err = 0;
-       void *iobuf;
-       u64 lba;
-
-       internal_lbasize = nsblk_internal_lbasize(nsblk);
-       sector_size = nsblk_sector_size(nsblk);
-       while (len) {
-               unsigned int cur_len;
-
-               /*
-                * If we don't have an integrity payload, we don't have to
-                * split the bvec into sectors, as this would cause unnecessary
-                * Block Window setup/move steps. the do_io routine is capable
-                * of handling len <= PAGE_SIZE.
-                */
-               cur_len = bip ? min(len, sector_size) : len;
-
-               lba = div_u64(sector << SECTOR_SHIFT, sector_size);
-               ns_offset = lba * internal_lbasize;
-               dev_offset = to_dev_offset(nsblk, ns_offset, cur_len);
-               if (dev_offset == SIZE_MAX)
-                       return -EIO;
-
-               iobuf = kmap_atomic(page);
-               err = ndbr->do_io(ndbr, dev_offset, iobuf + off, cur_len, rw);
-               kunmap_atomic(iobuf);
-               if (err)
-                       return err;
-
-               if (bip) {
-                       err = nd_blk_rw_integrity(nsblk, bip, lba, rw);
-                       if (err)
-                               return err;
-               }
-               len -= cur_len;
-               off += cur_len;
-               sector += sector_size >> SECTOR_SHIFT;
-       }
-
-       return err;
-}
-
-static void nd_blk_submit_bio(struct bio *bio)
-{
-       struct bio_integrity_payload *bip;
-       struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data;
-       struct bvec_iter iter;
-       unsigned long start;
-       struct bio_vec bvec;
-       int err = 0, rw;
-       bool do_acct;
-
-       if (!bio_integrity_prep(bio))
-               return;
-
-       bip = bio_integrity(bio);
-       rw = bio_data_dir(bio);
-       do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
-       if (do_acct)
-               start = bio_start_io_acct(bio);
-       bio_for_each_segment(bvec, bio, iter) {
-               unsigned int len = bvec.bv_len;
-
-               BUG_ON(len > PAGE_SIZE);
-               err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
-                               bvec.bv_offset, rw, iter.bi_sector);
-               if (err) {
-                       dev_dbg(&nsblk->common.dev,
-                                       "io error in %s sector %lld, len %d,\n",
-                                       (rw == READ) ? "READ" : "WRITE",
-                                       (unsigned long long) iter.bi_sector, len);
-                       bio->bi_status = errno_to_blk_status(err);
-                       break;
-               }
-       }
-       if (do_acct)
-               bio_end_io_acct(bio, start);
-
-       bio_endio(bio);
-}
-
-static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
-               resource_size_t offset, void *iobuf, size_t n, int rw,
-               unsigned long flags)
-{
-       struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
-       struct nd_blk_region *ndbr = to_ndbr(nsblk);
-       resource_size_t dev_offset;
-
-       dev_offset = to_dev_offset(nsblk, offset, n);
-
-       if (unlikely(offset + n > nsblk->size)) {
-               dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
-               return -EFAULT;
-       }
-
-       if (dev_offset == SIZE_MAX)
-               return -EIO;
-
-       return ndbr->do_io(ndbr, dev_offset, iobuf, n, rw);
-}
-
-static const struct block_device_operations nd_blk_fops = {
-       .owner = THIS_MODULE,
-       .submit_bio =  nd_blk_submit_bio,
-};
-
-static void nd_blk_release_disk(void *disk)
-{
-       del_gendisk(disk);
-       blk_cleanup_disk(disk);
-}
-
-static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
-{
-       struct device *dev = &nsblk->common.dev;
-       resource_size_t available_disk_size;
-       struct gendisk *disk;
-       u64 internal_nlba;
-       int rc;
-
-       internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
-       available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
-
-       disk = blk_alloc_disk(NUMA_NO_NODE);
-       if (!disk)
-               return -ENOMEM;
-
-       disk->fops              = &nd_blk_fops;
-       disk->private_data      = nsblk;
-       nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
-
-       blk_queue_max_hw_sectors(disk->queue, UINT_MAX);
-       blk_queue_logical_block_size(disk->queue, nsblk_sector_size(nsblk));
-       blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
-
-       if (nsblk_meta_size(nsblk)) {
-               rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
-
-               if (rc)
-                       goto out_before_devm_err;
-       }
-
-       set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
-       rc = device_add_disk(dev, disk, NULL);
-       if (rc)
-               goto out_before_devm_err;
-
-       /* nd_blk_release_disk() is called if this fails */
-       if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
-               return -ENOMEM;
-
-       nvdimm_check_and_set_ro(disk);
-       return 0;
-
-out_before_devm_err:
-       blk_cleanup_disk(disk);
-       return rc;
-}
-
-static int nd_blk_probe(struct device *dev)
-{
-       struct nd_namespace_common *ndns;
-       struct nd_namespace_blk *nsblk;
-
-       ndns = nvdimm_namespace_common_probe(dev);
-       if (IS_ERR(ndns))
-               return PTR_ERR(ndns);
-
-       nsblk = to_nd_namespace_blk(&ndns->dev);
-       nsblk->size = nvdimm_namespace_capacity(ndns);
-       dev_set_drvdata(dev, nsblk);
-
-       ndns->rw_bytes = nsblk_rw_bytes;
-       if (is_nd_btt(dev))
-               return nvdimm_namespace_attach_btt(ndns);
-       else if (nd_btt_probe(dev, ndns) == 0) {
-               /* we'll come back as btt-blk */
-               return -ENXIO;
-       } else
-               return nsblk_attach_disk(nsblk);
-}
-
-static void nd_blk_remove(struct device *dev)
-{
-       if (is_nd_btt(dev))
-               nvdimm_namespace_detach_btt(to_nd_btt(dev));
-}
-
-static struct nd_device_driver nd_blk_driver = {
-       .probe = nd_blk_probe,
-       .remove = nd_blk_remove,
-       .drv = {
-               .name = "nd_blk",
-       },
-       .type = ND_DRIVER_NAMESPACE_BLK,
-};
-
-static int __init nd_blk_init(void)
-{
-       return nd_driver_register(&nd_blk_driver);
-}
-
-static void __exit nd_blk_exit(void)
-{
-       driver_unregister(&nd_blk_driver.drv);
-}
-
-MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_BLK);
-module_init(nd_blk_init);
-module_exit(nd_blk_exit);
index 5bbe31b..7b0d144 100644 (file)
@@ -34,8 +34,6 @@ static int to_nd_device_type(struct device *dev)
                return ND_DEVICE_DIMM;
        else if (is_memory(dev))
                return ND_DEVICE_REGION_PMEM;
-       else if (is_nd_blk(dev))
-               return ND_DEVICE_REGION_BLK;
        else if (is_nd_dax(dev))
                return ND_DEVICE_DAX_PMEM;
        else if (is_nd_region(dev->parent))
index dc7449a..ee507ee 100644 (file)
 
 static DEFINE_IDA(dimm_ida);
 
-static bool noblk;
-module_param(noblk, bool, 0444);
-MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
-
 /*
  * Retrieve bus and dimm handle and return if this bus supports
  * get_config_data commands
@@ -211,22 +207,6 @@ struct nvdimm *to_nvdimm(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(to_nvdimm);
 
-struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
-{
-       struct nd_region *nd_region = &ndbr->nd_region;
-       struct nd_mapping *nd_mapping = &nd_region->mapping[0];
-
-       return nd_mapping->nvdimm;
-}
-EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
-
-unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
-{
-       /* pmem mapping properties are private to libnvdimm */
-       return ARCH_MEMREMAP_PMEM;
-}
-EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
-
 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
 {
        struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -312,8 +292,7 @@ static ssize_t flags_show(struct device *dev,
 {
        struct nvdimm *nvdimm = to_nvdimm(dev);
 
-       return sprintf(buf, "%s%s%s\n",
-                       test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
+       return sprintf(buf, "%s%s\n",
                        test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
                        test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
 }
@@ -612,8 +591,6 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
 
        nvdimm->dimm_id = dimm_id;
        nvdimm->provider_data = provider_data;
-       if (noblk)
-               flags |= 1 << NDD_NOBLK;
        nvdimm->flags = flags;
        nvdimm->cmd_mask = cmd_mask;
        nvdimm->num_flush = num_flush;
@@ -726,133 +703,6 @@ static unsigned long dpa_align(struct nd_region *nd_region)
        return nd_region->align / nd_region->ndr_mappings;
 }
 
-int alias_dpa_busy(struct device *dev, void *data)
-{
-       resource_size_t map_end, blk_start, new;
-       struct blk_alloc_info *info = data;
-       struct nd_mapping *nd_mapping;
-       struct nd_region *nd_region;
-       struct nvdimm_drvdata *ndd;
-       struct resource *res;
-       unsigned long align;
-       int i;
-
-       if (!is_memory(dev))
-               return 0;
-
-       nd_region = to_nd_region(dev);
-       for (i = 0; i < nd_region->ndr_mappings; i++) {
-               nd_mapping  = &nd_region->mapping[i];
-               if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
-                       break;
-       }
-
-       if (i >= nd_region->ndr_mappings)
-               return 0;
-
-       ndd = to_ndd(nd_mapping);
-       map_end = nd_mapping->start + nd_mapping->size - 1;
-       blk_start = nd_mapping->start;
-
-       /*
-        * In the allocation case ->res is set to free space that we are
-        * looking to validate against PMEM aliasing collision rules
-        * (i.e. BLK is allocated after all aliased PMEM).
-        */
-       if (info->res) {
-               if (info->res->start >= nd_mapping->start
-                               && info->res->start < map_end)
-                       /* pass */;
-               else
-                       return 0;
-       }
-
- retry:
-       /*
-        * Find the free dpa from the end of the last pmem allocation to
-        * the end of the interleave-set mapping.
-        */
-       align = dpa_align(nd_region);
-       if (!align)
-               return 0;
-
-       for_each_dpa_resource(ndd, res) {
-               resource_size_t start, end;
-
-               if (strncmp(res->name, "pmem", 4) != 0)
-                       continue;
-
-               start = ALIGN_DOWN(res->start, align);
-               end = ALIGN(res->end + 1, align) - 1;
-               if ((start >= blk_start && start < map_end)
-                               || (end >= blk_start && end <= map_end)) {
-                       new = max(blk_start, min(map_end, end) + 1);
-                       if (new != blk_start) {
-                               blk_start = new;
-                               goto retry;
-                       }
-               }
-       }
-
-       /* update the free space range with the probed blk_start */
-       if (info->res && blk_start > info->res->start) {
-               info->res->start = max(info->res->start, blk_start);
-               if (info->res->start > info->res->end)
-                       info->res->end = info->res->start - 1;
-               return 1;
-       }
-
-       info->available -= blk_start - nd_mapping->start;
-
-       return 0;
-}
-
-/**
- * nd_blk_available_dpa - account the unused dpa of BLK region
- * @nd_mapping: container of dpa-resource-root + labels
- *
- * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
- * we arrange for them to never start at an lower dpa than the last
- * PMEM allocation in an aliased region.
- */
-resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
-{
-       struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
-       struct nd_mapping *nd_mapping = &nd_region->mapping[0];
-       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-       struct blk_alloc_info info = {
-               .nd_mapping = nd_mapping,
-               .available = nd_mapping->size,
-               .res = NULL,
-       };
-       struct resource *res;
-       unsigned long align;
-
-       if (!ndd)
-               return 0;
-
-       device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
-
-       /* now account for busy blk allocations in unaliased dpa */
-       align = dpa_align(nd_region);
-       if (!align)
-               return 0;
-       for_each_dpa_resource(ndd, res) {
-               resource_size_t start, end, size;
-
-               if (strncmp(res->name, "blk", 3) != 0)
-                       continue;
-               start = ALIGN_DOWN(res->start, align);
-               end = ALIGN(res->end + 1, align) - 1;
-               size = end - start + 1;
-               if (size >= info.available)
-                       return 0;
-               info.available -= size;
-       }
-
-       return info.available;
-}
-
 /**
  * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
  *                        contiguous unallocated dpa range.
@@ -900,24 +750,16 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
  * @nd_mapping: container of dpa-resource-root + labels
  * @nd_region: constrain available space check to this reference region
- * @overlap: calculate available space assuming this level of overlap
  *
  * Validate that a PMEM label, if present, aligns with the start of an
- * interleave set and truncate the available size at the lowest BLK
- * overlap point.
- *
- * The expectation is that this routine is called multiple times as it
- * probes for the largest BLK encroachment for any single member DIMM of
- * the interleave set.  Once that value is determined the PMEM-limit for
- * the set can be established.
+ * interleave set.
  */
 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
-               struct nd_mapping *nd_mapping, resource_size_t *overlap)
+                                     struct nd_mapping *nd_mapping)
 {
-       resource_size_t map_start, map_end, busy = 0, available, blk_start;
        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+       resource_size_t map_start, map_end, busy = 0;
        struct resource *res;
-       const char *reason;
        unsigned long align;
 
        if (!ndd)
@@ -929,46 +771,28 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
 
        map_start = nd_mapping->start;
        map_end = map_start + nd_mapping->size - 1;
-       blk_start = max(map_start, map_end + 1 - *overlap);
        for_each_dpa_resource(ndd, res) {
                resource_size_t start, end;
 
                start = ALIGN_DOWN(res->start, align);
                end = ALIGN(res->end + 1, align) - 1;
                if (start >= map_start && start < map_end) {
-                       if (strncmp(res->name, "blk", 3) == 0)
-                               blk_start = min(blk_start,
-                                               max(map_start, start));
-                       else if (end > map_end) {
-                               reason = "misaligned to iset";
-                               goto err;
-                       } else
-                               busy += end - start + 1;
+                       if (end > map_end) {
+                               nd_dbg_dpa(nd_region, ndd, res,
+                                          "misaligned to iset\n");
+                               return 0;
+                       }
+                       busy += end - start + 1;
                } else if (end >= map_start && end <= map_end) {
-                       if (strncmp(res->name, "blk", 3) == 0) {
-                               /*
-                                * If a BLK allocation overlaps the start of
-                                * PMEM the entire interleave set may now only
-                                * be used for BLK.
-                                */
-                               blk_start = map_start;
-                       } else
-                               busy += end - start + 1;
+                       busy += end - start + 1;
                } else if (map_start > start && map_start < end) {
                        /* total eclipse of the mapping */
                        busy += nd_mapping->size;
-                       blk_start = map_start;
                }
        }
 
-       *overlap = map_end + 1 - blk_start;
-       available = blk_start - map_start;
-       if (busy < available)
-               return ALIGN_DOWN(available - busy, align);
-       return 0;
-
- err:
-       nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
+       if (busy < nd_mapping->size)
+               return ALIGN_DOWN(nd_mapping->size - busy, align);
        return 0;
 }
 
@@ -999,7 +823,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
 /**
  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
  * @nvdimm: container of dpa-resource-root + labels
- * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
+ * @label_id: dpa resource name of the form pmem-<human readable uuid>
  */
 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
                struct nd_label_id *label_id)
index 5ec9a40..082253a 100644 (file)
@@ -334,8 +334,7 @@ char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
 {
        if (!label_id || !uuid)
                return NULL;
-       snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
-                       flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
+       snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid);
        return label_id->id;
 }
 
@@ -406,7 +405,6 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
                return 0; /* no label, nothing to reserve */
 
        for_each_clear_bit_le(slot, free, nslot) {
-               struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
                struct nd_namespace_label *nd_label;
                struct nd_region *nd_region = NULL;
                struct nd_label_id label_id;
@@ -421,8 +419,6 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
 
                nsl_get_uuid(ndd, nd_label, &label_uuid);
                flags = nsl_get_flags(ndd, nd_label);
-               if (test_bit(NDD_NOBLK, &nvdimm->flags))
-                       flags &= ~NSLABEL_FLAG_LOCAL;
                nd_label_gen_id(&label_id, &label_uuid, flags);
                res = nvdimm_allocate_dpa(ndd, &label_id,
                                          nsl_get_dpa(ndd, nd_label),
@@ -968,326 +964,6 @@ static int __pmem_label_update(struct nd_region *nd_region,
        return rc;
 }
 
-static bool is_old_resource(struct resource *res, struct resource **list, int n)
-{
-       int i;
-
-       if (res->flags & DPA_RESOURCE_ADJUSTED)
-               return false;
-       for (i = 0; i < n; i++)
-               if (res == list[i])
-                       return true;
-       return false;
-}
-
-static struct resource *to_resource(struct nvdimm_drvdata *ndd,
-               struct nd_namespace_label *nd_label)
-{
-       struct resource *res;
-
-       for_each_dpa_resource(ndd, res) {
-               if (res->start != nsl_get_dpa(ndd, nd_label))
-                       continue;
-               if (resource_size(res) != nsl_get_rawsize(ndd, nd_label))
-                       continue;
-               return res;
-       }
-
-       return NULL;
-}
-
-/*
- * Use the presence of the type_guid as a flag to determine isetcookie
- * usage and nlabel + position policy for blk-aperture namespaces.
- */
-static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd,
-                                  struct nd_namespace_label *nd_label,
-                                  u64 isetcookie)
-{
-       if (efi_namespace_label_has(ndd, type_guid)) {
-               nsl_set_isetcookie(ndd, nd_label, isetcookie);
-               return;
-       }
-       nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */
-}
-
-bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
-                                struct nd_namespace_label *nd_label,
-                                u64 isetcookie)
-{
-       if (!efi_namespace_label_has(ndd, type_guid))
-               return true;
-
-       if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) {
-               dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie,
-                       nsl_get_isetcookie(ndd, nd_label));
-               return false;
-       }
-
-       return true;
-}
-
-static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd,
-                              struct nd_namespace_label *nd_label, int nlabel,
-                              bool first)
-{
-       if (!efi_namespace_label_has(ndd, type_guid)) {
-               nsl_set_nlabel(ndd, nd_label, 0); /* N/A */
-               return;
-       }
-       nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff);
-}
-
-static void nsl_set_blk_position(struct nvdimm_drvdata *ndd,
-                                struct nd_namespace_label *nd_label,
-                                bool first)
-{
-       if (!efi_namespace_label_has(ndd, type_guid)) {
-               nsl_set_position(ndd, nd_label, 0);
-               return;
-       }
-       nsl_set_position(ndd, nd_label, first ? 0 : 0xffff);
-}
-
-/*
- * 1/ Account all the labels that can be freed after this update
- * 2/ Allocate and write the label to the staging (next) index
- * 3/ Record the resources in the namespace device
- */
-static int __blk_label_update(struct nd_region *nd_region,
-               struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
-               int num_labels)
-{
-       int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
-       struct nd_interleave_set *nd_set = nd_region->nd_set;
-       struct nd_namespace_common *ndns = &nsblk->common;
-       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-       struct nd_namespace_label *nd_label;
-       struct nd_label_ent *label_ent, *e;
-       struct nd_namespace_index *nsindex;
-       unsigned long *free, *victim_map = NULL;
-       struct resource *res, **old_res_list;
-       struct nd_label_id label_id;
-       int min_dpa_idx = 0;
-       LIST_HEAD(list);
-       u32 nslot, slot;
-
-       if (!preamble_next(ndd, &nsindex, &free, &nslot))
-               return -ENXIO;
-
-       old_res_list = nsblk->res;
-       nfree = nd_label_nfree(ndd);
-       old_num_resources = nsblk->num_resources;
-       nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
-
-       /*
-        * We need to loop over the old resources a few times, which seems a
-        * bit inefficient, but we need to know that we have the label
-        * space before we start mutating the tracking structures.
-        * Otherwise the recovery method of last resort for userspace is
-        * disable and re-enable the parent region.
-        */
-       alloc = 0;
-       for_each_dpa_resource(ndd, res) {
-               if (strcmp(res->name, label_id.id) != 0)
-                       continue;
-               if (!is_old_resource(res, old_res_list, old_num_resources))
-                       alloc++;
-       }
-
-       victims = 0;
-       if (old_num_resources) {
-               /* convert old local-label-map to dimm-slot victim-map */
-               victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
-               if (!victim_map)
-                       return -ENOMEM;
-
-               /* mark unused labels for garbage collection */
-               for_each_clear_bit_le(slot, free, nslot) {
-                       nd_label = to_label(ndd, slot);
-                       if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
-                               continue;
-                       res = to_resource(ndd, nd_label);
-                       if (res && is_old_resource(res, old_res_list,
-                                               old_num_resources))
-                               continue;
-                       slot = to_slot(ndd, nd_label);
-                       set_bit(slot, victim_map);
-                       victims++;
-               }
-       }
-
-       /* don't allow updates that consume the last label */
-       if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
-               dev_info(&nsblk->common.dev, "insufficient label space\n");
-               bitmap_free(victim_map);
-               return -ENOSPC;
-       }
-       /* from here on we need to abort on error */
-
-
-       /* assign all resources to the namespace before writing the labels */
-       nsblk->res = NULL;
-       nsblk->num_resources = 0;
-       for_each_dpa_resource(ndd, res) {
-               if (strcmp(res->name, label_id.id) != 0)
-                       continue;
-               if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
-                       rc = -ENOMEM;
-                       goto abort;
-               }
-       }
-
-       /* release slots associated with any invalidated UUIDs */
-       mutex_lock(&nd_mapping->lock);
-       list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
-               if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
-                       reap_victim(nd_mapping, label_ent);
-                       list_move(&label_ent->list, &list);
-               }
-       mutex_unlock(&nd_mapping->lock);
-
-       /*
-        * Find the resource associated with the first label in the set
-        * per the v1.2 namespace specification.
-        */
-       for (i = 0; i < nsblk->num_resources; i++) {
-               struct resource *min = nsblk->res[min_dpa_idx];
-
-               res = nsblk->res[i];
-               if (res->start < min->start)
-                       min_dpa_idx = i;
-       }
-
-       for (i = 0; i < nsblk->num_resources; i++) {
-               size_t offset;
-
-               res = nsblk->res[i];
-               if (is_old_resource(res, old_res_list, old_num_resources))
-                       continue; /* carry-over */
-               slot = nd_label_alloc_slot(ndd);
-               if (slot == UINT_MAX) {
-                       rc = -ENXIO;
-                       goto abort;
-               }
-               dev_dbg(ndd->dev, "allocated: %d\n", slot);
-
-               nd_label = to_label(ndd, slot);
-               memset(nd_label, 0, sizeof_namespace_label(ndd));
-               nsl_set_uuid(ndd, nd_label, nsblk->uuid);
-               nsl_set_name(ndd, nd_label, nsblk->alt_name);
-               nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL);
-
-               nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources,
-                                  i == min_dpa_idx);
-               nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx);
-               nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2);
-
-               nsl_set_dpa(ndd, nd_label, res->start);
-               nsl_set_rawsize(ndd, nd_label, resource_size(res));
-               nsl_set_lbasize(ndd, nd_label, nsblk->lbasize);
-               nsl_set_slot(ndd, nd_label, slot);
-               nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
-               nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
-               nsl_calculate_checksum(ndd, nd_label);
-
-               /* update label */
-               offset = nd_label_offset(ndd, nd_label);
-               rc = nvdimm_set_config_data(ndd, offset, nd_label,
-                               sizeof_namespace_label(ndd));
-               if (rc < 0)
-                       goto abort;
-       }
-
-       /* free up now unused slots in the new index */
-       for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
-               dev_dbg(ndd->dev, "free: %d\n", slot);
-               nd_label_free_slot(ndd, slot);
-       }
-
-       /* update index */
-       rc = nd_label_write_index(ndd, ndd->ns_next,
-                       nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
-       if (rc)
-               goto abort;
-
-       /*
-        * Now that the on-dimm labels are up to date, fix up the tracking
-        * entries in nd_mapping->labels
-        */
-       nlabel = 0;
-       mutex_lock(&nd_mapping->lock);
-       list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
-               nd_label = label_ent->label;
-               if (!nd_label)
-                       continue;
-               nlabel++;
-               if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
-                       continue;
-               nlabel--;
-               list_move(&label_ent->list, &list);
-               label_ent->label = NULL;
-       }
-       list_splice_tail_init(&list, &nd_mapping->labels);
-       mutex_unlock(&nd_mapping->lock);
-
-       if (nlabel + nsblk->num_resources > num_labels) {
-               /*
-                * Bug, we can't end up with more resources than
-                * available labels
-                */
-               WARN_ON_ONCE(1);
-               rc = -ENXIO;
-               goto out;
-       }
-
-       mutex_lock(&nd_mapping->lock);
-       label_ent = list_first_entry_or_null(&nd_mapping->labels,
-                       typeof(*label_ent), list);
-       if (!label_ent) {
-               WARN_ON(1);
-               mutex_unlock(&nd_mapping->lock);
-               rc = -ENXIO;
-               goto out;
-       }
-       for_each_clear_bit_le(slot, free, nslot) {
-               nd_label = to_label(ndd, slot);
-               if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
-                       continue;
-               res = to_resource(ndd, nd_label);
-               res->flags &= ~DPA_RESOURCE_ADJUSTED;
-               dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
-               list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
-                       if (label_ent->label)
-                               continue;
-                       label_ent->label = nd_label;
-                       nd_label = NULL;
-                       break;
-               }
-               if (nd_label)
-                       dev_WARN(&nsblk->common.dev,
-                                       "failed to track label slot%d\n", slot);
-       }
-       mutex_unlock(&nd_mapping->lock);
-
- out:
-       kfree(old_res_list);
-       bitmap_free(victim_map);
-       return rc;
-
- abort:
-       /*
-        * 1/ repair the allocated label bitmap in the index
-        * 2/ restore the resource list
-        */
-       nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
-       kfree(nsblk->res);
-       nsblk->res = old_res_list;
-       nsblk->num_resources = old_num_resources;
-       old_res_list = NULL;
-       goto out;
-}
-
 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
 {
        int i, old_num_labels = 0;
@@ -1425,26 +1101,6 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
        return 0;
 }
 
-int nd_blk_namespace_label_update(struct nd_region *nd_region,
-               struct nd_namespace_blk *nsblk, resource_size_t size)
-{
-       struct nd_mapping *nd_mapping = &nd_region->mapping[0];
-       struct resource *res;
-       int count = 0;
-
-       if (size == 0)
-               return del_labels(nd_mapping, nsblk->uuid);
-
-       for_each_dpa_resource(to_ndd(nd_mapping), res)
-               count++;
-
-       count = init_labels(nd_mapping, count);
-       if (count < 0)
-               return count;
-
-       return __blk_label_update(nd_region, nd_mapping, nsblk, count);
-}
-
 int __init nd_label_init(void)
 {
        WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
index 8ee248f..0650fb4 100644 (file)
@@ -193,7 +193,7 @@ struct nd_namespace_label {
 
 /**
  * struct nd_label_id - identifier string for dpa allocation
- * @id: "{blk|pmem}-<namespace uuid>"
+ * @id: "pmem-<namespace uuid>"
  */
 struct nd_label_id {
        char id[ND_LABEL_ID_SIZE];
@@ -221,9 +221,6 @@ bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
 u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
 struct nd_region;
 struct nd_namespace_pmem;
-struct nd_namespace_blk;
 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
                struct nd_namespace_pmem *nspm, resource_size_t size);
-int nd_blk_namespace_label_update(struct nd_region *nd_region,
-               struct nd_namespace_blk *nsblk, resource_size_t size);
 #endif /* __LABEL_H__ */
index b57a2d3..62b83b2 100644 (file)
@@ -32,21 +32,7 @@ static void namespace_pmem_release(struct device *dev)
        kfree(nspm);
 }
 
-static void namespace_blk_release(struct device *dev)
-{
-       struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-       struct nd_region *nd_region = to_nd_region(dev->parent);
-
-       if (nsblk->id >= 0)
-               ida_simple_remove(&nd_region->ns_ida, nsblk->id);
-       kfree(nsblk->alt_name);
-       kfree(nsblk->uuid);
-       kfree(nsblk->res);
-       kfree(nsblk);
-}
-
 static bool is_namespace_pmem(const struct device *dev);
-static bool is_namespace_blk(const struct device *dev);
 static bool is_namespace_io(const struct device *dev);
 
 static int is_uuid_busy(struct device *dev, void *data)
@@ -57,10 +43,6 @@ static int is_uuid_busy(struct device *dev, void *data)
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                uuid2 = nspm->uuid;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               uuid2 = nsblk->uuid;
        } else if (is_nd_btt(dev)) {
                struct nd_btt *nd_btt = to_nd_btt(dev);
 
@@ -178,12 +160,6 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
                else
                        sprintf(name, "pmem%d%s", nd_region->id,
                                        suffix ? suffix : "");
-       } else if (is_namespace_blk(&ndns->dev)) {
-               struct nd_namespace_blk *nsblk;
-
-               nsblk = to_nd_namespace_blk(&ndns->dev);
-               sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
-                               suffix ? suffix : "");
        } else {
                return NULL;
        }
@@ -201,10 +177,6 @@ const uuid_t *nd_dev_to_uuid(struct device *dev)
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                return nspm->uuid;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               return nsblk->uuid;
        } else
                return &uuid_null;
 }
@@ -229,10 +201,6 @@ static ssize_t __alt_name_store(struct device *dev, const char *buf,
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                ns_altname = &nspm->alt_name;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               ns_altname = &nsblk->alt_name;
        } else
                return -ENXIO;
 
@@ -264,83 +232,6 @@ out:
        return rc;
 }
 
-static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
-{
-       struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
-       struct nd_mapping *nd_mapping = &nd_region->mapping[0];
-       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-       struct nd_label_id label_id;
-       resource_size_t size = 0;
-       struct resource *res;
-
-       if (!nsblk->uuid)
-               return 0;
-       nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
-       for_each_dpa_resource(ndd, res)
-               if (strcmp(res->name, label_id.id) == 0)
-                       size += resource_size(res);
-       return size;
-}
-
-static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
-{
-       struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
-       struct nd_mapping *nd_mapping = &nd_region->mapping[0];
-       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-       struct nd_label_id label_id;
-       struct resource *res;
-       int count, i;
-
-       if (!nsblk->uuid || !nsblk->lbasize || !ndd)
-               return false;
-
-       count = 0;
-       nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
-       for_each_dpa_resource(ndd, res) {
-               if (strcmp(res->name, label_id.id) != 0)
-                       continue;
-               /*
-                * Resources with unacknowledged adjustments indicate a
-                * failure to update labels
-                */
-               if (res->flags & DPA_RESOURCE_ADJUSTED)
-                       return false;
-               count++;
-       }
-
-       /* These values match after a successful label update */
-       if (count != nsblk->num_resources)
-               return false;
-
-       for (i = 0; i < nsblk->num_resources; i++) {
-               struct resource *found = NULL;
-
-               for_each_dpa_resource(ndd, res)
-                       if (res == nsblk->res[i]) {
-                               found = res;
-                               break;
-                       }
-               /* stale resource */
-               if (!found)
-                       return false;
-       }
-
-       return true;
-}
-
-resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
-{
-       resource_size_t size;
-
-       nvdimm_bus_lock(&nsblk->common.dev);
-       size = __nd_namespace_blk_validate(nsblk);
-       nvdimm_bus_unlock(&nsblk->common.dev);
-
-       return size;
-}
-EXPORT_SYMBOL(nd_namespace_blk_validate);
-
-
 static int nd_namespace_label_update(struct nd_region *nd_region,
                struct device *dev)
 {
@@ -363,16 +254,6 @@ static int nd_namespace_label_update(struct nd_region *nd_region,
                        return 0;
 
                return nd_pmem_namespace_label_update(nd_region, nspm, size);
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-               resource_size_t size = nd_namespace_blk_size(nsblk);
-
-               if (size == 0 && nsblk->uuid)
-                       /* delete allocation */;
-               else if (!nsblk->uuid || !nsblk->lbasize)
-                       return 0;
-
-               return nd_blk_namespace_label_update(nd_region, nsblk, size);
        } else
                return -ENXIO;
 }
@@ -405,10 +286,6 @@ static ssize_t alt_name_show(struct device *dev,
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                ns_altname = nspm->alt_name;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               ns_altname = nsblk->alt_name;
        } else
                return -ENXIO;
 
@@ -420,13 +297,11 @@ static int scan_free(struct nd_region *nd_region,
                struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
                resource_size_t n)
 {
-       bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
        int rc = 0;
 
        while (n) {
                struct resource *res, *last;
-               resource_size_t new_start;
 
                last = NULL;
                for_each_dpa_resource(ndd, res)
@@ -444,16 +319,7 @@ static int scan_free(struct nd_region *nd_region,
                        continue;
                }
 
-               /*
-                * Keep BLK allocations relegated to high DPA as much as
-                * possible
-                */
-               if (is_blk)
-                       new_start = res->start + n;
-               else
-                       new_start = res->start;
-
-               rc = adjust_resource(res, new_start, resource_size(res) - n);
+               rc = adjust_resource(res, res->start, resource_size(res) - n);
                if (rc == 0)
                        res->flags |= DPA_RESOURCE_ADJUSTED;
                nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
@@ -495,20 +361,12 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
                struct nd_region *nd_region, struct nd_mapping *nd_mapping,
                resource_size_t n)
 {
-       bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-       resource_size_t first_dpa;
        struct resource *res;
        int rc = 0;
 
-       /* allocate blk from highest dpa first */
-       if (is_blk)
-               first_dpa = nd_mapping->start + nd_mapping->size - n;
-       else
-               first_dpa = nd_mapping->start;
-
        /* first resource allocation for this label-id or dimm */
-       res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
+       res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n);
        if (!res)
                rc = -EBUSY;
 
@@ -539,7 +397,6 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
                resource_size_t n, struct resource *valid)
 {
        bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
-       bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
        unsigned long align;
 
        align = nd_region->align / nd_region->ndr_mappings;
@@ -552,21 +409,6 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
        if (is_reserve)
                return;
 
-       if (!is_pmem) {
-               struct nd_mapping *nd_mapping = &nd_region->mapping[0];
-               struct nvdimm_bus *nvdimm_bus;
-               struct blk_alloc_info info = {
-                       .nd_mapping = nd_mapping,
-                       .available = nd_mapping->size,
-                       .res = valid,
-               };
-
-               WARN_ON(!is_nd_blk(&nd_region->dev));
-               nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
-               device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
-               return;
-       }
-
        /* allocation needs to be contiguous, so this is all or nothing */
        if (resource_size(valid) < n)
                goto invalid;
@@ -594,7 +436,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
                resource_size_t n)
 {
        resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
-       bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
        struct resource *res, *exist = NULL, valid;
        const resource_size_t to_allocate = n;
@@ -692,10 +533,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
                }
 
                if (strcmp(action, "allocate") == 0) {
-                       /* BLK allocate bottom up */
-                       if (!is_pmem)
-                               valid.start += available - allocate;
-
                        new_res = nvdimm_allocate_dpa(ndd, label_id,
                                        valid.start, allocate);
                        if (!new_res)
@@ -731,12 +568,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
                        return 0;
        }
 
-       /*
-        * If we allocated nothing in the BLK case it may be because we are in
-        * an initial "pmem-reserve pass".  Only do an initial BLK allocation
-        * when none of the DPA space is reserved.
-        */
-       if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
+       if (n == to_allocate)
                return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
        return n;
 }
@@ -795,7 +627,7 @@ int __reserve_free_pmem(struct device *dev, void *data)
                if (nd_mapping->nvdimm != nvdimm)
                        continue;
 
-               n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
+               n = nd_pmem_available_dpa(nd_region, nd_mapping);
                if (n == 0)
                        return 0;
                rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
@@ -820,19 +652,6 @@ void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
                        nvdimm_free_dpa(ndd, res);
 }
 
-static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
-               struct nd_mapping *nd_mapping)
-{
-       struct nvdimm *nvdimm = nd_mapping->nvdimm;
-       int rc;
-
-       rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
-                       __reserve_free_pmem);
-       if (rc)
-               release_free_pmem(nvdimm_bus, nd_mapping);
-       return rc;
-}
-
 /**
  * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
  * @nd_region: the set of dimms to allocate @n more bytes from
@@ -849,37 +668,14 @@ static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
 static int grow_dpa_allocation(struct nd_region *nd_region,
                struct nd_label_id *label_id, resource_size_t n)
 {
-       struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
-       bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
        int i;
 
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                resource_size_t rem = n;
-               int rc, j;
-
-               /*
-                * In the BLK case try once with all unallocated PMEM
-                * reserved, and once without
-                */
-               for (j = is_pmem; j < 2; j++) {
-                       bool blk_only = j == 0;
-
-                       if (blk_only) {
-                               rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
-                               if (rc)
-                                       return rc;
-                       }
-                       rem = scan_allocate(nd_region, nd_mapping,
-                                       label_id, rem);
-                       if (blk_only)
-                               release_free_pmem(nvdimm_bus, nd_mapping);
-
-                       /* try again and allow encroachments into PMEM */
-                       if (rem == 0)
-                               break;
-               }
+               int rc;
 
+               rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
                dev_WARN_ONCE(&nd_region->dev, rem,
                                "allocation underrun: %#llx of %#llx bytes\n",
                                (unsigned long long) n - rem,
@@ -966,12 +762,6 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 
                uuid = nspm->uuid;
                id = nspm->id;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               uuid = nsblk->uuid;
-               flags = NSLABEL_FLAG_LOCAL;
-               id = nsblk->id;
        }
 
        /*
@@ -998,8 +788,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
                ndd = to_ndd(nd_mapping);
 
                /*
-                * All dimms in an interleave set, or the base dimm for a blk
-                * region, need to be enabled for the size to be changed.
+                * All dimms in an interleave set, need to be enabled
+                * for the size to be changed.
                 */
                if (!ndd)
                        return -ENXIO;
@@ -1067,10 +857,6 @@ static ssize_t size_store(struct device *dev,
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                uuid = &nspm->uuid;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               uuid = &nsblk->uuid;
        }
 
        if (rc == 0 && val == 0 && uuid) {
@@ -1095,8 +881,6 @@ resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                return resource_size(&nspm->nsio.res);
-       } else if (is_namespace_blk(dev)) {
-               return nd_namespace_blk_size(to_nd_namespace_blk(dev));
        } else if (is_namespace_io(dev)) {
                struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
 
@@ -1152,12 +936,8 @@ static uuid_t *namespace_to_uuid(struct device *dev)
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                return nspm->uuid;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               return nsblk->uuid;
-       } else
-               return ERR_PTR(-ENXIO);
+       }
+       return ERR_PTR(-ENXIO);
 }
 
 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
@@ -1183,7 +963,6 @@ static int namespace_update_uuid(struct nd_region *nd_region,
                                 struct device *dev, uuid_t *new_uuid,
                                 uuid_t **old_uuid)
 {
-       u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
        struct nd_label_id old_label_id;
        struct nd_label_id new_label_id;
        int i;
@@ -1214,8 +993,8 @@ static int namespace_update_uuid(struct nd_region *nd_region,
                        return -EBUSY;
        }
 
-       nd_label_gen_id(&old_label_id, *old_uuid, flags);
-       nd_label_gen_id(&new_label_id, new_uuid, flags);
+       nd_label_gen_id(&old_label_id, *old_uuid, 0);
+       nd_label_gen_id(&new_label_id, new_uuid, 0);
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -1261,10 +1040,6 @@ static ssize_t uuid_store(struct device *dev,
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                ns_uuid = &nspm->uuid;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               ns_uuid = &nsblk->uuid;
        } else
                return -ENXIO;
 
@@ -1313,21 +1088,11 @@ static ssize_t resource_show(struct device *dev,
 }
 static DEVICE_ATTR_ADMIN_RO(resource);
 
-static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
-       4096, 4104, 4160, 4224, 0 };
-
 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
 
 static ssize_t sector_size_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               return nd_size_select_show(nsblk->lbasize,
-                               blk_lbasize_supported, buf);
-       }
-
        if (is_namespace_pmem(dev)) {
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
@@ -1345,12 +1110,7 @@ static ssize_t sector_size_store(struct device *dev,
        unsigned long *lbasize;
        ssize_t rc = 0;
 
-       if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               lbasize = &nsblk->lbasize;
-               supported = blk_lbasize_supported;
-       } else if (is_namespace_pmem(dev)) {
+       if (is_namespace_pmem(dev)) {
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                lbasize = &nspm->lbasize;
@@ -1390,11 +1150,6 @@ static ssize_t dpa_extents_show(struct device *dev,
 
                uuid = nspm->uuid;
                flags = 0;
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               uuid = nsblk->uuid;
-               flags = NSLABEL_FLAG_LOCAL;
        }
 
        if (!uuid)
@@ -1627,10 +1382,7 @@ static umode_t namespace_visible(struct kobject *kobj,
 {
        struct device *dev = container_of(kobj, struct device, kobj);
 
-       if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
-               return 0;
-
-       if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
+       if (is_namespace_pmem(dev)) {
                if (a == &dev_attr_size.attr)
                        return 0644;
 
@@ -1671,22 +1423,11 @@ static const struct device_type namespace_pmem_device_type = {
        .groups = nd_namespace_attribute_groups,
 };
 
-static const struct device_type namespace_blk_device_type = {
-       .name = "nd_namespace_blk",
-       .release = namespace_blk_release,
-       .groups = nd_namespace_attribute_groups,
-};
-
 static bool is_namespace_pmem(const struct device *dev)
 {
        return dev ? dev->type == &namespace_pmem_device_type : false;
 }
 
-static bool is_namespace_blk(const struct device *dev)
-{
-       return dev ? dev->type == &namespace_blk_device_type : false;
-}
-
 static bool is_namespace_io(const struct device *dev)
 {
        return dev ? dev->type == &namespace_io_device_type : false;
@@ -1769,18 +1510,6 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
                nspm = to_nd_namespace_pmem(&ndns->dev);
                if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
                        return ERR_PTR(-ENODEV);
-       } else if (is_namespace_blk(&ndns->dev)) {
-               struct nd_namespace_blk *nsblk;
-
-               nsblk = to_nd_namespace_blk(&ndns->dev);
-               if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
-                       return ERR_PTR(-ENODEV);
-               if (!nsblk->lbasize) {
-                       dev_dbg(&ndns->dev, "sector size not set\n");
-                       return ERR_PTR(-ENODEV);
-               }
-               if (!nd_namespace_blk_validate(nsblk))
-                       return ERR_PTR(-ENODEV);
        }
 
        return ndns;
@@ -1790,16 +1519,12 @@ EXPORT_SYMBOL(nvdimm_namespace_common_probe);
 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
                resource_size_t size)
 {
-       if (is_namespace_blk(&ndns->dev))
-               return 0;
        return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
 }
 EXPORT_SYMBOL_GPL(devm_namespace_enable);
 
 void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
 {
-       if (is_namespace_blk(&ndns->dev))
-               return;
        devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
 }
 EXPORT_SYMBOL_GPL(devm_namespace_disable);
@@ -2014,10 +1739,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
        /*
         * Fix up each mapping's 'labels' to have the validated pmem label for
         * that position at labels[0], and NULL at labels[1].  In the process,
-        * check that the namespace aligns with interleave-set.  We know
-        * that it does not overlap with any blk namespaces by virtue of
-        * the dimm being enabled (i.e. nd_label_reserve_dpa()
-        * succeeded).
+        * check that the namespace aligns with interleave-set.
         */
        nsl_get_uuid(ndd, nd_label, &uuid);
        rc = select_pmem_id(nd_region, &uuid);
@@ -2077,54 +1799,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
        return ERR_PTR(rc);
 }
 
-struct resource *nsblk_add_resource(struct nd_region *nd_region,
-               struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
-               resource_size_t start)
-{
-       struct nd_label_id label_id;
-       struct resource *res;
-
-       nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
-       res = krealloc(nsblk->res,
-                       sizeof(void *) * (nsblk->num_resources + 1),
-                       GFP_KERNEL);
-       if (!res)
-               return NULL;
-       nsblk->res = (struct resource **) res;
-       for_each_dpa_resource(ndd, res)
-               if (strcmp(res->name, label_id.id) == 0
-                               && res->start == start) {
-                       nsblk->res[nsblk->num_resources++] = res;
-                       return res;
-               }
-       return NULL;
-}
-
-static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
-{
-       struct nd_namespace_blk *nsblk;
-       struct device *dev;
-
-       if (!is_nd_blk(&nd_region->dev))
-               return NULL;
-
-       nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
-       if (!nsblk)
-               return NULL;
-
-       dev = &nsblk->common.dev;
-       dev->type = &namespace_blk_device_type;
-       nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
-       if (nsblk->id < 0) {
-               kfree(nsblk);
-               return NULL;
-       }
-       dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
-       dev->parent = &nd_region->dev;
-
-       return &nsblk->common.dev;
-}
-
 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
 {
        struct nd_namespace_pmem *nspm;
@@ -2163,18 +1837,14 @@ void nd_region_create_ns_seed(struct nd_region *nd_region)
        if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
                return;
 
-       if (is_nd_blk(&nd_region->dev))
-               nd_region->ns_seed = nd_namespace_blk_create(nd_region);
-       else
-               nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
+       nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
 
        /*
         * Seed creation failures are not fatal, provisioning is simply
         * disabled until memory becomes available
         */
        if (!nd_region->ns_seed)
-               dev_err(&nd_region->dev, "failed to create %s namespace\n",
-                               is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
+               dev_err(&nd_region->dev, "failed to create namespace\n");
        else
                nd_device_register(nd_region->ns_seed);
 }
@@ -2225,7 +1895,6 @@ static int add_namespace_resource(struct nd_region *nd_region,
 
        for (i = 0; i < count; i++) {
                uuid_t *uuid = namespace_to_uuid(devs[i]);
-               struct resource *res;
 
                if (IS_ERR(uuid)) {
                        WARN_ON(1);
@@ -2234,91 +1903,23 @@ static int add_namespace_resource(struct nd_region *nd_region,
 
                if (!nsl_uuid_equal(ndd, nd_label, uuid))
                        continue;
-               if (is_namespace_blk(devs[i])) {
-                       res = nsblk_add_resource(nd_region, ndd,
-                                       to_nd_namespace_blk(devs[i]),
-                                       nsl_get_dpa(ndd, nd_label));
-                       if (!res)
-                               return -ENXIO;
-                       nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
-               } else {
-                       dev_err(&nd_region->dev,
-                               "error: conflicting extents for uuid: %pUb\n",
-                               uuid);
-                       return -ENXIO;
-               }
-               break;
+               dev_err(&nd_region->dev,
+                       "error: conflicting extents for uuid: %pUb\n", uuid);
+               return -ENXIO;
        }
 
        return i;
 }
 
-static struct device *create_namespace_blk(struct nd_region *nd_region,
-               struct nd_namespace_label *nd_label, int count)
-{
-
-       struct nd_mapping *nd_mapping = &nd_region->mapping[0];
-       struct nd_interleave_set *nd_set = nd_region->nd_set;
-       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-       struct nd_namespace_blk *nsblk;
-       char name[NSLABEL_NAME_LEN];
-       struct device *dev = NULL;
-       struct resource *res;
-       uuid_t uuid;
-
-       if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
-               return ERR_PTR(-EAGAIN);
-       if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2))
-               return ERR_PTR(-EAGAIN);
-
-       nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
-       if (!nsblk)
-               return ERR_PTR(-ENOMEM);
-       dev = &nsblk->common.dev;
-       dev->type = &namespace_blk_device_type;
-       dev->parent = &nd_region->dev;
-       nsblk->id = -1;
-       nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
-       nsl_get_uuid(ndd, nd_label, &uuid);
-       nsblk->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
-       nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
-       if (!nsblk->uuid)
-               goto blk_err;
-       nsl_get_name(ndd, nd_label, name);
-       if (name[0]) {
-               nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL);
-               if (!nsblk->alt_name)
-                       goto blk_err;
-       }
-       res = nsblk_add_resource(nd_region, ndd, nsblk,
-                       nsl_get_dpa(ndd, nd_label));
-       if (!res)
-               goto blk_err;
-       nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
-       return dev;
- blk_err:
-       namespace_blk_release(dev);
-       return ERR_PTR(-ENXIO);
-}
-
 static int cmp_dpa(const void *a, const void *b)
 {
        const struct device *dev_a = *(const struct device **) a;
        const struct device *dev_b = *(const struct device **) b;
-       struct nd_namespace_blk *nsblk_a, *nsblk_b;
        struct nd_namespace_pmem *nspm_a, *nspm_b;
 
        if (is_namespace_io(dev_a))
                return 0;
 
-       if (is_namespace_blk(dev_a)) {
-               nsblk_a = to_nd_namespace_blk(dev_a);
-               nsblk_b = to_nd_namespace_blk(dev_b);
-
-               return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
-                               sizeof(resource_size_t));
-       }
-
        nspm_a = to_nd_namespace_pmem(dev_a);
        nspm_b = to_nd_namespace_pmem(dev_b);
 
@@ -2339,16 +1940,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
        list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
                struct nd_namespace_label *nd_label = label_ent->label;
                struct device **__devs;
-               u32 flags;
 
                if (!nd_label)
                        continue;
-               flags = nsl_get_flags(ndd, nd_label);
-               if (is_nd_blk(&nd_region->dev)
-                               == !!(flags & NSLABEL_FLAG_LOCAL))
-                       /* pass, region matches label type */;
-               else
-                       continue;
 
                /* skip labels that describe extents outside of the region */
                if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
@@ -2367,12 +1961,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
                kfree(devs);
                devs = __devs;
 
-               if (is_nd_blk(&nd_region->dev))
-                       dev = create_namespace_blk(nd_region, nd_label, count);
-               else
-                       dev = create_namespace_pmem(nd_region, nd_mapping,
-                                                   nd_label);
-
+               dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
                if (IS_ERR(dev)) {
                        switch (PTR_ERR(dev)) {
                        case -EAGAIN:
@@ -2389,35 +1978,25 @@ static struct device **scan_labels(struct nd_region *nd_region)
 
        }
 
-       dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
-                       count, is_nd_blk(&nd_region->dev)
-                       ? "blk" : "pmem", count == 1 ? "" : "s");
+       dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
+               count == 1 ? "" : "s");
 
        if (count == 0) {
+               struct nd_namespace_pmem *nspm;
+
                /* Publish a zero-sized namespace for userspace to configure. */
                nd_mapping_free_labels(nd_mapping);
 
                devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
                if (!devs)
                        goto err;
-               if (is_nd_blk(&nd_region->dev)) {
-                       struct nd_namespace_blk *nsblk;
 
-                       nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
-                       if (!nsblk)
-                               goto err;
-                       dev = &nsblk->common.dev;
-                       dev->type = &namespace_blk_device_type;
-               } else {
-                       struct nd_namespace_pmem *nspm;
-
-                       nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
-                       if (!nspm)
-                               goto err;
-                       dev = &nspm->nsio.common.dev;
-                       dev->type = &namespace_pmem_device_type;
-                       nd_namespace_pmem_set_resource(nd_region, nspm, 0);
-               }
+               nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+               if (!nspm)
+                       goto err;
+               dev = &nspm->nsio.common.dev;
+               dev->type = &namespace_pmem_device_type;
+               nd_namespace_pmem_set_resource(nd_region, nspm, 0);
                dev->parent = &nd_region->dev;
                devs[count++] = dev;
        } else if (is_memory(&nd_region->dev)) {
@@ -2452,10 +2031,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
  err:
        if (devs) {
                for (i = 0; devs[i]; i++)
-                       if (is_nd_blk(&nd_region->dev))
-                               namespace_blk_release(devs[i]);
-                       else
-                               namespace_pmem_release(devs[i]);
+                       namespace_pmem_release(devs[i]);
                kfree(devs);
        }
        return NULL;
@@ -2554,12 +2130,6 @@ static int init_active_labels(struct nd_region *nd_region)
                        if (!label_ent)
                                break;
                        label = nd_label_active(ndd, j);
-                       if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
-                               u32 flags = nsl_get_flags(ndd, label);
-
-                               flags &= ~NSLABEL_FLAG_LOCAL;
-                               nsl_set_flags(ndd, label, flags);
-                       }
                        label_ent->label = label;
 
                        mutex_lock(&nd_mapping->lock);
@@ -2603,7 +2173,6 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
                devs = create_namespace_io(nd_region);
                break;
        case ND_DEVICE_NAMESPACE_PMEM:
-       case ND_DEVICE_NAMESPACE_BLK:
                devs = create_namespaces(nd_region);
                break;
        default:
@@ -2618,19 +2187,12 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
                struct device *dev = devs[i];
                int id;
 
-               if (type == ND_DEVICE_NAMESPACE_BLK) {
-                       struct nd_namespace_blk *nsblk;
-
-                       nsblk = to_nd_namespace_blk(dev);
-                       id = ida_simple_get(&nd_region->ns_ida, 0, 0,
-                                       GFP_KERNEL);
-                       nsblk->id = id;
-               } else if (type == ND_DEVICE_NAMESPACE_PMEM) {
+               if (type == ND_DEVICE_NAMESPACE_PMEM) {
                        struct nd_namespace_pmem *nspm;
 
                        nspm = to_nd_namespace_pmem(dev);
                        id = ida_simple_get(&nd_region->ns_ida, 0, 0,
-                                       GFP_KERNEL);
+                                           GFP_KERNEL);
                        nspm->id = id;
                } else
                        id = i;
index 2650a85..448f9dc 100644 (file)
@@ -82,30 +82,12 @@ static inline void nvdimm_security_overwrite_query(struct work_struct *work)
 }
 #endif
 
-/**
- * struct blk_alloc_info - tracking info for BLK dpa scanning
- * @nd_mapping: blk region mapping boundaries
- * @available: decremented in alias_dpa_busy as aliased PMEM is scanned
- * @busy: decremented in blk_dpa_busy to account for ranges already
- *       handled by alias_dpa_busy
- * @res: alias_dpa_busy interprets this a free space range that needs to
- *      be truncated to the valid BLK allocation starting DPA, blk_dpa_busy
- *      treats it as a busy range that needs the aliased PMEM ranges
- *      truncated.
- */
-struct blk_alloc_info {
-       struct nd_mapping *nd_mapping;
-       resource_size_t available, busy;
-       struct resource *res;
-};
-
 bool is_nvdimm(struct device *dev);
 bool is_nd_pmem(struct device *dev);
 bool is_nd_volatile(struct device *dev);
-bool is_nd_blk(struct device *dev);
 static inline bool is_nd_region(struct device *dev)
 {
-       return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
+       return is_nd_pmem(dev) || is_nd_volatile(dev);
 }
 static inline bool is_memory(struct device *dev)
 {
@@ -142,17 +124,12 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
                                           struct nd_mapping *nd_mapping);
 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
-               struct nd_mapping *nd_mapping, resource_size_t *overlap);
-resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
+                                     struct nd_mapping *nd_mapping);
 resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
                resource_size_t size);
 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
                struct nd_label_id *label_id);
-int alias_dpa_busy(struct device *dev, void *data);
-struct resource *nsblk_add_resource(struct nd_region *nd_region,
-               struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
-               resource_size_t start);
 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
 void get_ndd(struct nvdimm_drvdata *ndd);
 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
index 6f8ce11..ec52196 100644 (file)
@@ -295,9 +295,6 @@ static inline const u8 *nsl_uuid_raw(struct nvdimm_drvdata *ndd,
        return nd_label->efi.uuid;
 }
 
-bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
-                                struct nd_namespace_label *nd_label,
-                                u64 isetcookie);
 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
                            struct nd_namespace_label *nd_label, guid_t *guid);
 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
@@ -437,14 +434,6 @@ static inline bool nsl_validate_nlabel(struct nd_region *nd_region,
        return nsl_get_nlabel(ndd, nd_label) == nd_region->ndr_mappings;
 }
 
-struct nd_blk_region {
-       int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
-       int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
-                       void *iobuf, u64 len, int rw);
-       void *blk_provider_data;
-       struct nd_region nd_region;
-};
-
 /*
  * Lookup next in the repeating sequence of 01, 10, and 11.
  */
@@ -672,7 +661,6 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
        return -ENXIO;
 }
 #endif
-int nd_blk_region_init(struct nd_region *nd_region);
 int nd_region_activate(struct nd_region *nd_region);
 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
                unsigned int len)
@@ -687,7 +675,6 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
 
        return false;
 }
-resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
 const uuid_t *nd_dev_to_uuid(struct device *dev);
 bool pmem_should_map_pages(struct device *dev);
 #endif /* __ND_H__ */
diff --git a/drivers/nvdimm/nd_perf.c b/drivers/nvdimm/nd_perf.c
new file mode 100644 (file)
index 0000000..433bbb6
--- /dev/null
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * nd_perf.c: NVDIMM Device Performance Monitoring Unit support
+ *
+ * Perf interface to expose nvdimm performance stats.
+ *
+ * Copyright (C) 2021 IBM Corporation
+ */
+
+#define pr_fmt(fmt) "nvdimm_pmu: " fmt
+
+#include <linux/nd.h>
+#include <linux/platform_device.h>
+
+#define EVENT(_name, _code)     enum{_name = _code}
+
+/*
+ * NVDIMM Events codes.
+ */
+
+/* Controller Reset Count */
+EVENT(CTL_RES_CNT,             0x1);
+/* Controller Reset Elapsed Time */
+EVENT(CTL_RES_TM,              0x2);
+/* Power-on Seconds */
+EVENT(POWERON_SECS,            0x3);
+/* Life Remaining */
+EVENT(MEM_LIFE,                0x4);
+/* Critical Resource Utilization */
+EVENT(CRI_RES_UTIL,            0x5);
+/* Host Load Count */
+EVENT(HOST_L_CNT,              0x6);
+/* Host Store Count */
+EVENT(HOST_S_CNT,              0x7);
+/* Host Store Duration */
+EVENT(HOST_S_DUR,              0x8);
+/* Host Load Duration */
+EVENT(HOST_L_DUR,              0x9);
+/* Media Read Count */
+EVENT(MED_R_CNT,               0xa);
+/* Media Write Count */
+EVENT(MED_W_CNT,               0xb);
+/* Media Read Duration */
+EVENT(MED_R_DUR,               0xc);
+/* Media Write Duration */
+EVENT(MED_W_DUR,               0xd);
+/* Cache Read Hit Count */
+EVENT(CACHE_RH_CNT,            0xe);
+/* Cache Write Hit Count */
+EVENT(CACHE_WH_CNT,            0xf);
+/* Fast Write Count */
+EVENT(FAST_W_CNT,              0x10);
+
+NVDIMM_EVENT_ATTR(ctl_res_cnt,         CTL_RES_CNT);
+NVDIMM_EVENT_ATTR(ctl_res_tm,          CTL_RES_TM);
+NVDIMM_EVENT_ATTR(poweron_secs,                POWERON_SECS);
+NVDIMM_EVENT_ATTR(mem_life,            MEM_LIFE);
+NVDIMM_EVENT_ATTR(cri_res_util,                CRI_RES_UTIL);
+NVDIMM_EVENT_ATTR(host_l_cnt,          HOST_L_CNT);
+NVDIMM_EVENT_ATTR(host_s_cnt,          HOST_S_CNT);
+NVDIMM_EVENT_ATTR(host_s_dur,          HOST_S_DUR);
+NVDIMM_EVENT_ATTR(host_l_dur,          HOST_L_DUR);
+NVDIMM_EVENT_ATTR(med_r_cnt,           MED_R_CNT);
+NVDIMM_EVENT_ATTR(med_w_cnt,           MED_W_CNT);
+NVDIMM_EVENT_ATTR(med_r_dur,           MED_R_DUR);
+NVDIMM_EVENT_ATTR(med_w_dur,           MED_W_DUR);
+NVDIMM_EVENT_ATTR(cache_rh_cnt,                CACHE_RH_CNT);
+NVDIMM_EVENT_ATTR(cache_wh_cnt,                CACHE_WH_CNT);
+NVDIMM_EVENT_ATTR(fast_w_cnt,          FAST_W_CNT);
+
+static struct attribute *nvdimm_events_attr[] = {
+       NVDIMM_EVENT_PTR(CTL_RES_CNT),
+       NVDIMM_EVENT_PTR(CTL_RES_TM),
+       NVDIMM_EVENT_PTR(POWERON_SECS),
+       NVDIMM_EVENT_PTR(MEM_LIFE),
+       NVDIMM_EVENT_PTR(CRI_RES_UTIL),
+       NVDIMM_EVENT_PTR(HOST_L_CNT),
+       NVDIMM_EVENT_PTR(HOST_S_CNT),
+       NVDIMM_EVENT_PTR(HOST_S_DUR),
+       NVDIMM_EVENT_PTR(HOST_L_DUR),
+       NVDIMM_EVENT_PTR(MED_R_CNT),
+       NVDIMM_EVENT_PTR(MED_W_CNT),
+       NVDIMM_EVENT_PTR(MED_R_DUR),
+       NVDIMM_EVENT_PTR(MED_W_DUR),
+       NVDIMM_EVENT_PTR(CACHE_RH_CNT),
+       NVDIMM_EVENT_PTR(CACHE_WH_CNT),
+       NVDIMM_EVENT_PTR(FAST_W_CNT),
+       NULL
+};
+
+static struct attribute_group nvdimm_pmu_events_group = {
+       .name = "events",
+       .attrs = nvdimm_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-4");
+
+static struct attribute *nvdimm_pmu_format_attr[] = {
+       &format_attr_event.attr,
+       NULL,
+};
+
+static struct attribute_group nvdimm_pmu_format_group = {
+       .name = "format",
+       .attrs = nvdimm_pmu_format_attr,
+};
+
+ssize_t nvdimm_events_sysfs_show(struct device *dev,
+                                struct device_attribute *attr, char *page)
+{
+       struct perf_pmu_events_attr *pmu_attr;
+
+       pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+       return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+static ssize_t nvdimm_pmu_cpumask_show(struct device *dev,
+                                      struct device_attribute *attr, char *buf)
+{
+       struct pmu *pmu = dev_get_drvdata(dev);
+       struct nvdimm_pmu *nd_pmu;
+
+       nd_pmu = container_of(pmu, struct nvdimm_pmu, pmu);
+
+       return cpumap_print_to_pagebuf(true, buf, cpumask_of(nd_pmu->cpu));
+}
+
+static int nvdimm_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+       struct nvdimm_pmu *nd_pmu;
+       u32 target;
+       int nodeid;
+       const struct cpumask *cpumask;
+
+       nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
+
+       /* Clear it, incase given cpu is set in nd_pmu->arch_cpumask */
+       cpumask_test_and_clear_cpu(cpu, &nd_pmu->arch_cpumask);
+
+       /*
+        * If given cpu is not same as current designated cpu for
+        * counter access, just return.
+        */
+       if (cpu != nd_pmu->cpu)
+               return 0;
+
+       /* Check for any active cpu in nd_pmu->arch_cpumask */
+       target = cpumask_any(&nd_pmu->arch_cpumask);
+
+       /*
+        * Incase we don't have any active cpu in nd_pmu->arch_cpumask,
+        * check in given cpu's numa node list.
+        */
+       if (target >= nr_cpu_ids) {
+               nodeid = cpu_to_node(cpu);
+               cpumask = cpumask_of_node(nodeid);
+               target = cpumask_any_but(cpumask, cpu);
+       }
+       nd_pmu->cpu = target;
+
+       /* Migrate nvdimm pmu events to the new target cpu if valid */
+       if (target >= 0 && target < nr_cpu_ids)
+               perf_pmu_migrate_context(&nd_pmu->pmu, cpu, target);
+
+       return 0;
+}
+
+static int nvdimm_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+       struct nvdimm_pmu *nd_pmu;
+
+       nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
+
+       if (nd_pmu->cpu >= nr_cpu_ids)
+               nd_pmu->cpu = cpu;
+
+       return 0;
+}
+
+static int create_cpumask_attr_group(struct nvdimm_pmu *nd_pmu)
+{
+       struct perf_pmu_events_attr *pmu_events_attr;
+       struct attribute **attrs_group;
+       struct attribute_group *nvdimm_pmu_cpumask_group;
+
+       pmu_events_attr = kzalloc(sizeof(*pmu_events_attr), GFP_KERNEL);
+       if (!pmu_events_attr)
+               return -ENOMEM;
+
+       attrs_group = kzalloc(2 * sizeof(struct attribute *), GFP_KERNEL);
+       if (!attrs_group) {
+               kfree(pmu_events_attr);
+               return -ENOMEM;
+       }
+
+       /* Allocate memory for cpumask attribute group */
+       nvdimm_pmu_cpumask_group = kzalloc(sizeof(*nvdimm_pmu_cpumask_group), GFP_KERNEL);
+       if (!nvdimm_pmu_cpumask_group) {
+               kfree(pmu_events_attr);
+               kfree(attrs_group);
+               return -ENOMEM;
+       }
+
+       sysfs_attr_init(&pmu_events_attr->attr.attr);
+       pmu_events_attr->attr.attr.name = "cpumask";
+       pmu_events_attr->attr.attr.mode = 0444;
+       pmu_events_attr->attr.show = nvdimm_pmu_cpumask_show;
+       attrs_group[0] = &pmu_events_attr->attr.attr;
+       attrs_group[1] = NULL;
+
+       nvdimm_pmu_cpumask_group->attrs = attrs_group;
+       nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR] = nvdimm_pmu_cpumask_group;
+       return 0;
+}
+
+static int nvdimm_pmu_cpu_hotplug_init(struct nvdimm_pmu *nd_pmu)
+{
+       int nodeid, rc;
+       const struct cpumask *cpumask;
+
+       /*
+        * Incase of cpu hotplug feature, arch specific code
+        * can provide required cpumask which can be used
+        * to get designatd cpu for counter access.
+        * Check for any active cpu in nd_pmu->arch_cpumask.
+        */
+       if (!cpumask_empty(&nd_pmu->arch_cpumask)) {
+               nd_pmu->cpu = cpumask_any(&nd_pmu->arch_cpumask);
+       } else {
+               /* pick active cpu from the cpumask of device numa node. */
+               nodeid = dev_to_node(nd_pmu->dev);
+               cpumask = cpumask_of_node(nodeid);
+               nd_pmu->cpu = cpumask_any(cpumask);
+       }
+
+       rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/nvdimm:online",
+                                    nvdimm_pmu_cpu_online, nvdimm_pmu_cpu_offline);
+
+       if (rc < 0)
+               return rc;
+
+       nd_pmu->cpuhp_state = rc;
+
+       /* Register the pmu instance for cpu hotplug */
+       rc = cpuhp_state_add_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
+       if (rc) {
+               cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
+               return rc;
+       }
+
+       /* Create cpumask attribute group */
+       rc = create_cpumask_attr_group(nd_pmu);
+       if (rc) {
+               cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
+               cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
+               return rc;
+       }
+
+       return 0;
+}
+
+static void nvdimm_pmu_free_hotplug_memory(struct nvdimm_pmu *nd_pmu)
+{
+       cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
+       cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
+
+       if (nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR])
+               kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]->attrs);
+       kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]);
+}
+
+int register_nvdimm_pmu(struct nvdimm_pmu *nd_pmu, struct platform_device *pdev)
+{
+       int rc;
+
+       if (!nd_pmu || !pdev)
+               return -EINVAL;
+
+       /* event functions like add/del/read/event_init and pmu name should not be NULL */
+       if (WARN_ON_ONCE(!(nd_pmu->pmu.event_init && nd_pmu->pmu.add &&
+                          nd_pmu->pmu.del && nd_pmu->pmu.read && nd_pmu->pmu.name)))
+               return -EINVAL;
+
+       nd_pmu->pmu.attr_groups = kzalloc((NVDIMM_PMU_NULL_ATTR + 1) *
+                                         sizeof(struct attribute_group *), GFP_KERNEL);
+       if (!nd_pmu->pmu.attr_groups)
+               return -ENOMEM;
+
+       /*
+        * Add platform_device->dev pointer to nvdimm_pmu to access
+        * device data in events functions.
+        */
+       nd_pmu->dev = &pdev->dev;
+
+       /* Fill attribute groups for the nvdimm pmu device */
+       nd_pmu->pmu.attr_groups[NVDIMM_PMU_FORMAT_ATTR] = &nvdimm_pmu_format_group;
+       nd_pmu->pmu.attr_groups[NVDIMM_PMU_EVENT_ATTR] = &nvdimm_pmu_events_group;
+       nd_pmu->pmu.attr_groups[NVDIMM_PMU_NULL_ATTR] = NULL;
+
+       /* Fill attribute group for cpumask */
+       rc = nvdimm_pmu_cpu_hotplug_init(nd_pmu);
+       if (rc) {
+               pr_info("cpu hotplug feature failed for device: %s\n", nd_pmu->pmu.name);
+               kfree(nd_pmu->pmu.attr_groups);
+               return rc;
+       }
+
+       rc = perf_pmu_register(&nd_pmu->pmu, nd_pmu->pmu.name, -1);
+       if (rc) {
+               kfree(nd_pmu->pmu.attr_groups);
+               nvdimm_pmu_free_hotplug_memory(nd_pmu);
+               return rc;
+       }
+
+       pr_info("%s NVDIMM performance monitor support registered\n",
+               nd_pmu->pmu.name);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(register_nvdimm_pmu);
+
+void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu)
+{
+       perf_pmu_unregister(&nd_pmu->pmu);
+       nvdimm_pmu_free_hotplug_memory(nd_pmu);
+       kfree(nd_pmu);
+}
+EXPORT_SYMBOL_GPL(unregister_nvdimm_pmu);
index e0c3412..188560b 100644 (file)
@@ -15,6 +15,10 @@ static int nd_region_probe(struct device *dev)
        static unsigned long once;
        struct nd_region_data *ndrd;
        struct nd_region *nd_region = to_nd_region(dev);
+       struct range range = {
+               .start = nd_region->ndr_start,
+               .end = nd_region->ndr_start + nd_region->ndr_size - 1,
+       };
 
        if (nd_region->num_lanes > num_online_cpus()
                        && nd_region->num_lanes < num_possible_cpus()
@@ -30,25 +34,13 @@ static int nd_region_probe(struct device *dev)
        if (rc)
                return rc;
 
-       rc = nd_blk_region_init(nd_region);
-       if (rc)
-               return rc;
-
-       if (is_memory(&nd_region->dev)) {
-               struct range range = {
-                       .start = nd_region->ndr_start,
-                       .end = nd_region->ndr_start + nd_region->ndr_size - 1,
-               };
-
-               if (devm_init_badblocks(dev, &nd_region->bb))
-                       return -ENODEV;
-               nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
-                                                      "badblocks");
-               if (!nd_region->bb_state)
-                       dev_warn(&nd_region->dev,
-                                       "'badblocks' notification disabled\n");
-               nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
-       }
+       if (devm_init_badblocks(dev, &nd_region->bb))
+               return -ENODEV;
+       nd_region->bb_state =
+               sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks");
+       if (!nd_region->bb_state)
+               dev_warn(dev, "'badblocks' notification disabled\n");
+       nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
 
        rc = nd_region_register_namespaces(nd_region, &err);
        if (rc < 0)
@@ -158,4 +150,3 @@ void nd_region_exit(void)
 }
 
 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
-MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK);
index 9ccf3d6..0cb274c 100644 (file)
@@ -134,10 +134,7 @@ static void nd_region_release(struct device *dev)
        }
        free_percpu(nd_region->lane);
        memregion_free(nd_region->id);
-       if (is_nd_blk(dev))
-               kfree(to_nd_blk_region(dev));
-       else
-               kfree(nd_region);
+       kfree(nd_region);
 }
 
 struct nd_region *to_nd_region(struct device *dev)
@@ -157,33 +154,12 @@ struct device *nd_region_dev(struct nd_region *nd_region)
 }
 EXPORT_SYMBOL_GPL(nd_region_dev);
 
-struct nd_blk_region *to_nd_blk_region(struct device *dev)
-{
-       struct nd_region *nd_region = to_nd_region(dev);
-
-       WARN_ON(!is_nd_blk(dev));
-       return container_of(nd_region, struct nd_blk_region, nd_region);
-}
-EXPORT_SYMBOL_GPL(to_nd_blk_region);
-
 void *nd_region_provider_data(struct nd_region *nd_region)
 {
        return nd_region->provider_data;
 }
 EXPORT_SYMBOL_GPL(nd_region_provider_data);
 
-void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
-{
-       return ndbr->blk_provider_data;
-}
-EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
-
-void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
-{
-       ndbr->blk_provider_data = data;
-}
-EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
-
 /**
  * nd_region_to_nstype() - region to an integer namespace type
  * @nd_region: region-device to interrogate
@@ -208,8 +184,6 @@ int nd_region_to_nstype(struct nd_region *nd_region)
                        return ND_DEVICE_NAMESPACE_PMEM;
                else
                        return ND_DEVICE_NAMESPACE_IO;
-       } else if (is_nd_blk(&nd_region->dev)) {
-               return ND_DEVICE_NAMESPACE_BLK;
        }
 
        return 0;
@@ -332,14 +306,12 @@ static DEVICE_ATTR_RO(set_cookie);
 
 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
 {
-       resource_size_t blk_max_overlap = 0, available, overlap;
+       resource_size_t available;
        int i;
 
        WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
 
- retry:
        available = 0;
-       overlap = blk_max_overlap;
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -348,15 +320,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
                if (!ndd)
                        return 0;
 
-               if (is_memory(&nd_region->dev)) {
-                       available += nd_pmem_available_dpa(nd_region,
-                                       nd_mapping, &overlap);
-                       if (overlap > blk_max_overlap) {
-                               blk_max_overlap = overlap;
-                               goto retry;
-                       }
-               } else if (is_nd_blk(&nd_region->dev))
-                       available += nd_blk_available_dpa(nd_region);
+               available += nd_pmem_available_dpa(nd_region, nd_mapping);
        }
 
        return available;
@@ -364,26 +328,17 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
 
 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
 {
-       resource_size_t available = 0;
+       resource_size_t avail = 0;
        int i;
 
-       if (is_memory(&nd_region->dev))
-               available = PHYS_ADDR_MAX;
-
        WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
 
-               if (is_memory(&nd_region->dev))
-                       available = min(available,
-                                       nd_pmem_max_contiguous_dpa(nd_region,
-                                                                  nd_mapping));
-               else if (is_nd_blk(&nd_region->dev))
-                       available += nd_blk_available_dpa(nd_region);
+               avail = min_not_zero(avail, nd_pmem_max_contiguous_dpa(
+                                                   nd_region, nd_mapping));
        }
-       if (is_memory(&nd_region->dev))
-               return available * nd_region->ndr_mappings;
-       return available;
+       return avail * nd_region->ndr_mappings;
 }
 
 static ssize_t available_size_show(struct device *dev,
@@ -693,9 +648,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
                        && a != &dev_attr_available_size.attr)
                return a->mode;
 
-       if ((type == ND_DEVICE_NAMESPACE_PMEM
-                               || type == ND_DEVICE_NAMESPACE_BLK)
-                       && a == &dev_attr_available_size.attr)
+       if (type == ND_DEVICE_NAMESPACE_PMEM &&
+           a == &dev_attr_available_size.attr)
                return a->mode;
        else if (is_memory(dev) && nd_set)
                return a->mode;
@@ -828,12 +782,6 @@ static const struct attribute_group *nd_region_attribute_groups[] = {
        NULL,
 };
 
-static const struct device_type nd_blk_device_type = {
-       .name = "nd_blk",
-       .release = nd_region_release,
-       .groups = nd_region_attribute_groups,
-};
-
 static const struct device_type nd_pmem_device_type = {
        .name = "nd_pmem",
        .release = nd_region_release,
@@ -851,11 +799,6 @@ bool is_nd_pmem(struct device *dev)
        return dev ? dev->type == &nd_pmem_device_type : false;
 }
 
-bool is_nd_blk(struct device *dev)
-{
-       return dev ? dev->type == &nd_blk_device_type : false;
-}
-
 bool is_nd_volatile(struct device *dev)
 {
        return dev ? dev->type == &nd_volatile_device_type : false;
@@ -929,22 +872,6 @@ void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
        nvdimm_bus_unlock(dev);
 }
 
-int nd_blk_region_init(struct nd_region *nd_region)
-{
-       struct device *dev = &nd_region->dev;
-       struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-
-       if (!is_nd_blk(dev))
-               return 0;
-
-       if (nd_region->ndr_mappings < 1) {
-               dev_dbg(dev, "invalid BLK region\n");
-               return -ENXIO;
-       }
-
-       return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
-}
-
 /**
  * nd_region_acquire_lane - allocate and lock a lane
  * @nd_region: region id and number of lanes possible
@@ -1007,23 +934,12 @@ EXPORT_SYMBOL(nd_region_release_lane);
 static unsigned long default_align(struct nd_region *nd_region)
 {
        unsigned long align;
-       int i, mappings;
        u32 remainder;
+       int mappings;
 
-       if (is_nd_blk(&nd_region->dev))
+       align = MEMREMAP_COMPAT_ALIGN_MAX;
+       if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
                align = PAGE_SIZE;
-       else
-               align = MEMREMAP_COMPAT_ALIGN_MAX;
-
-       for (i = 0; i < nd_region->ndr_mappings; i++) {
-               struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-               struct nvdimm *nvdimm = nd_mapping->nvdimm;
-
-               if (test_bit(NDD_ALIASING, &nvdimm->flags)) {
-                       align = MEMREMAP_COMPAT_ALIGN_MAX;
-                       break;
-               }
-       }
 
        mappings = max_t(u16, 1, nd_region->ndr_mappings);
        div_u64_rem(align, mappings, &remainder);
@@ -1039,7 +955,6 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
 {
        struct nd_region *nd_region;
        struct device *dev;
-       void *region_buf;
        unsigned int i;
        int ro = 0;
 
@@ -1057,36 +972,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
                if (test_bit(NDD_UNARMED, &nvdimm->flags))
                        ro = 1;
 
-               if (test_bit(NDD_NOBLK, &nvdimm->flags)
-                               && dev_type == &nd_blk_device_type) {
-                       dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
-                                       caller, dev_name(&nvdimm->dev), i);
-                       return NULL;
-               }
        }
 
-       if (dev_type == &nd_blk_device_type) {
-               struct nd_blk_region_desc *ndbr_desc;
-               struct nd_blk_region *ndbr;
-
-               ndbr_desc = to_blk_region_desc(ndr_desc);
-               ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
-                               * ndr_desc->num_mappings,
-                               GFP_KERNEL);
-               if (ndbr) {
-                       nd_region = &ndbr->nd_region;
-                       ndbr->enable = ndbr_desc->enable;
-                       ndbr->do_io = ndbr_desc->do_io;
-               }
-               region_buf = ndbr;
-       } else {
-               nd_region = kzalloc(struct_size(nd_region, mapping,
-                                               ndr_desc->num_mappings),
-                                   GFP_KERNEL);
-               region_buf = nd_region;
-       }
+       nd_region =
+               kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings),
+                       GFP_KERNEL);
 
-       if (!region_buf)
+       if (!nd_region)
                return NULL;
        nd_region->id = memregion_alloc(GFP_KERNEL);
        if (nd_region->id < 0)
@@ -1150,7 +1042,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
  err_percpu:
        memregion_free(nd_region->id);
  err_id:
-       kfree(region_buf);
+       kfree(nd_region);
        return NULL;
 }
 
@@ -1163,17 +1055,6 @@ struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
 }
 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
 
-struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
-               struct nd_region_desc *ndr_desc)
-{
-       if (ndr_desc->num_mappings > 1)
-               return NULL;
-       ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
-       return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
-                       __func__);
-}
-EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
-
 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
                struct nd_region_desc *ndr_desc)
 {
@@ -1198,7 +1079,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
 }
 /**
  * nvdimm_flush - flush any posted write queues between the cpu and pmem media
- * @nd_region: blk or interleaved pmem region
+ * @nd_region: interleaved pmem region
  */
 int generic_nvdimm_flush(struct nd_region *nd_region)
 {
@@ -1231,7 +1112,7 @@ EXPORT_SYMBOL_GPL(nvdimm_flush);
 
 /**
  * nvdimm_has_flush - determine write flushing requirements
- * @nd_region: blk or interleaved pmem region
+ * @nd_region: interleaved pmem region
  *
  * Returns 1 if writes require flushing
  * Returns 0 if writes do not require flushing
index 952a925..e330362 100644 (file)
@@ -142,9 +142,8 @@ struct dino_device
 {
        struct pci_hba_data     hba;    /* 'C' inheritance - must be first */
        spinlock_t              dinosaur_pen;
-       unsigned long           txn_addr; /* EIR addr to generate interrupt */ 
-       u32                     txn_data; /* EIR data assign to each dino */ 
        u32                     imr;      /* IRQ's which are enabled */ 
+       struct gsc_irq          gsc_irq;
        int                     global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */
 #ifdef DINO_DEBUG
        unsigned int            dino_irr0; /* save most recent IRQ line stat */
@@ -339,14 +338,43 @@ static void dino_unmask_irq(struct irq_data *d)
        if (tmp & DINO_MASK_IRQ(local_irq)) {
                DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
                                __func__, tmp);
-               gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
+               gsc_writel(dino_dev->gsc_irq.txn_data, dino_dev->gsc_irq.txn_addr);
        }
 }
 
+#ifdef CONFIG_SMP
+static int dino_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+                               bool force)
+{
+       struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
+       struct cpumask tmask;
+       int cpu_irq;
+       u32 eim;
+
+       if (!cpumask_and(&tmask, dest, cpu_online_mask))
+               return -EINVAL;
+
+       cpu_irq = cpu_check_affinity(d, &tmask);
+       if (cpu_irq < 0)
+               return cpu_irq;
+
+       dino_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
+       eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data;
+       __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0);
+
+       irq_data_update_effective_affinity(d, &tmask);
+
+       return IRQ_SET_MASK_OK;
+}
+#endif
+
 static struct irq_chip dino_interrupt_type = {
        .name           = "GSC-PCI",
        .irq_unmask     = dino_unmask_irq,
        .irq_mask       = dino_mask_irq,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = dino_set_affinity_irq,
+#endif
 };
 
 
@@ -806,7 +834,6 @@ static int __init dino_common_init(struct parisc_device *dev,
 {
        int status;
        u32 eim;
-       struct gsc_irq gsc_irq;
        struct resource *res;
 
        pcibios_register_hba(&dino_dev->hba);
@@ -821,10 +848,8 @@ static int __init dino_common_init(struct parisc_device *dev,
        **   still only has 11 IRQ input lines - just map some of them
        **   to a different processor.
        */
-       dev->irq = gsc_alloc_irq(&gsc_irq);
-       dino_dev->txn_addr = gsc_irq.txn_addr;
-       dino_dev->txn_data = gsc_irq.txn_data;
-       eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+       dev->irq = gsc_alloc_irq(&dino_dev->gsc_irq);
+       eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data;
 
        /* 
        ** Dino needs a PA "IRQ" to get a processor's attention.
index ed9371a..ec175ae 100644 (file)
@@ -135,10 +135,41 @@ static void gsc_asic_unmask_irq(struct irq_data *d)
         */
 }
 
+#ifdef CONFIG_SMP
+static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+                               bool force)
+{
+       struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d);
+       struct cpumask tmask;
+       int cpu_irq;
+
+       if (!cpumask_and(&tmask, dest, cpu_online_mask))
+               return -EINVAL;
+
+       cpu_irq = cpu_check_affinity(d, &tmask);
+       if (cpu_irq < 0)
+               return cpu_irq;
+
+       gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
+       gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data;
+
+       /* switch IRQ's for devices below LASI/WAX to other CPU */
+       gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
+
+       irq_data_update_effective_affinity(d, &tmask);
+
+       return IRQ_SET_MASK_OK;
+}
+#endif
+
+
 static struct irq_chip gsc_asic_interrupt_type = {
        .name           =       "GSC-ASIC",
        .irq_unmask     =       gsc_asic_unmask_irq,
        .irq_mask       =       gsc_asic_mask_irq,
+#ifdef CONFIG_SMP
+       .irq_set_affinity =     gsc_set_affinity_irq,
+#endif
 };
 
 int gsc_assign_irq(struct irq_chip *type, void *data)
index 86abad3..73cbd0b 100644 (file)
@@ -31,6 +31,7 @@ struct gsc_asic {
        int version;
        int type;
        int eim;
+       struct gsc_irq gsc_irq;
        int global_irq[32];
 };
 
index 4e4fd12..6ef621a 100644 (file)
@@ -163,7 +163,6 @@ static int __init lasi_init_chip(struct parisc_device *dev)
 {
        extern void (*chassis_power_off)(void);
        struct gsc_asic *lasi;
-       struct gsc_irq gsc_irq;
        int ret;
 
        lasi = kzalloc(sizeof(*lasi), GFP_KERNEL);
@@ -185,7 +184,7 @@ static int __init lasi_init_chip(struct parisc_device *dev)
        lasi_init_irq(lasi);
 
        /* the IRQ lasi should use */
-       dev->irq = gsc_alloc_irq(&gsc_irq);
+       dev->irq = gsc_alloc_irq(&lasi->gsc_irq);
        if (dev->irq < 0) {
                printk(KERN_ERR "%s(): cannot get GSC irq\n",
                                __func__);
@@ -193,9 +192,9 @@ static int __init lasi_init_chip(struct parisc_device *dev)
                return -EBUSY;
        }
 
-       lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+       lasi->eim = ((u32) lasi->gsc_irq.txn_addr) | lasi->gsc_irq.txn_data;
 
-       ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
+       ret = request_irq(lasi->gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
        if (ret < 0) {
                kfree(lasi);
                return ret;
index 5b6df15..73a2b01 100644 (file)
@@ -68,7 +68,6 @@ static int __init wax_init_chip(struct parisc_device *dev)
 {
        struct gsc_asic *wax;
        struct parisc_device *parent;
-       struct gsc_irq gsc_irq;
        int ret;
 
        wax = kzalloc(sizeof(*wax), GFP_KERNEL);
@@ -85,7 +84,7 @@ static int __init wax_init_chip(struct parisc_device *dev)
        wax_init_irq(wax);
 
        /* the IRQ wax should use */
-       dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ);
+       dev->irq = gsc_claim_irq(&wax->gsc_irq, WAX_GSC_IRQ);
        if (dev->irq < 0) {
                printk(KERN_ERR "%s(): cannot get GSC irq\n",
                                __func__);
@@ -93,9 +92,9 @@ static int __init wax_init_chip(struct parisc_device *dev)
                return -EBUSY;
        }
 
-       wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+       wax->eim = ((u32) wax->gsc_irq.txn_addr) | wax->gsc_irq.txn_data;
 
-       ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
+       ret = request_irq(wax->gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
        if (ret < 0) {
                kfree(wax);
                return ret;
index c3d0fcf..0feaa4b 100644 (file)
@@ -1214,10 +1214,9 @@ ptp_ocp_nvmem_device_get(struct ptp_ocp *bp, const void * const tag)
 static inline void
 ptp_ocp_nvmem_device_put(struct nvmem_device **nvmemp)
 {
-       if (*nvmemp != NULL) {
+       if (!IS_ERR_OR_NULL(*nvmemp))
                nvmem_device_put(*nvmemp);
-               *nvmemp = NULL;
-       }
+       *nvmemp = NULL;
 }
 
 static void
@@ -1241,13 +1240,15 @@ ptp_ocp_read_eeprom(struct ptp_ocp *bp)
                }
                if (!nvmem) {
                        nvmem = ptp_ocp_nvmem_device_get(bp, tag);
-                       if (!nvmem)
-                               goto out;
+                       if (IS_ERR(nvmem)) {
+                               ret = PTR_ERR(nvmem);
+                               goto fail;
+                       }
                }
                ret = nvmem_device_read(nvmem, map->off, map->len,
                                        BP_MAP_ENTRY_ADDR(bp, map));
                if (ret != map->len)
-                       goto read_fail;
+                       goto fail;
        }
 
        bp->has_eeprom_data = true;
@@ -1256,7 +1257,7 @@ out:
        ptp_ocp_nvmem_device_put(&nvmem);
        return;
 
-read_fail:
+fail:
        dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", ret);
        goto out;
 }
index 98b34ea..8e00a42 100644 (file)
@@ -271,7 +271,7 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
                              bool disable_clk)
 {
        struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
-       unsigned long timeout = jiffies + 2 * HZ;
+       unsigned long timeout;
 
        atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm);
 
index 64148f5..f171169 100644 (file)
@@ -109,10 +109,10 @@ static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
 }
 
 static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
-                           int duty_ns, int period_ns)
+                           u64 duty_ns, u64 period_ns)
 {
        struct kona_pwmc *kp = to_kona_pwmc(chip);
-       u64 val, div, rate;
+       u64 div, rate;
        unsigned long prescale = PRESCALE_MIN, pc, dc;
        unsigned int value, chan = pwm->hwpwm;
 
@@ -132,10 +132,8 @@ static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
        while (1) {
                div = 1000000000;
                div *= 1 + prescale;
-               val = rate * period_ns;
-               pc = div64_u64(val, div);
-               val = rate * duty_ns;
-               dc = div64_u64(val, div);
+               pc = mul_u64_u64_div_u64(rate, period_ns, div);
+               dc = mul_u64_u64_div_u64(rate, duty_ns, div);
 
                /* If duty_ns or period_ns are not achievable then return */
                if (pc < PERIOD_COUNT_MIN)
@@ -150,25 +148,18 @@ static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
                        return -EINVAL;
        }
 
-       /*
-        * Don't apply settings if disabled. The period and duty cycle are
-        * always calculated above to ensure the new values are
-        * validated immediately instead of on enable.
-        */
-       if (pwm_is_enabled(pwm)) {
-               kona_pwmc_prepare_for_settings(kp, chan);
+       kona_pwmc_prepare_for_settings(kp, chan);
 
-               value = readl(kp->base + PRESCALE_OFFSET);
-               value &= ~PRESCALE_MASK(chan);
-               value |= prescale << PRESCALE_SHIFT(chan);
-               writel(value, kp->base + PRESCALE_OFFSET);
+       value = readl(kp->base + PRESCALE_OFFSET);
+       value &= ~PRESCALE_MASK(chan);
+       value |= prescale << PRESCALE_SHIFT(chan);
+       writel(value, kp->base + PRESCALE_OFFSET);
 
-               writel(pc, kp->base + PERIOD_COUNT_OFFSET(chan));
+       writel(pc, kp->base + PERIOD_COUNT_OFFSET(chan));
 
-               writel(dc, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
+       writel(dc, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
 
-               kona_pwmc_apply_settings(kp, chan);
-       }
+       kona_pwmc_apply_settings(kp, chan);
 
        return 0;
 }
@@ -216,13 +207,6 @@ static int kona_pwmc_enable(struct pwm_chip *chip, struct pwm_device *pwm)
                return ret;
        }
 
-       ret = kona_pwmc_config(chip, pwm, pwm_get_duty_cycle(pwm),
-                              pwm_get_period(pwm));
-       if (ret < 0) {
-               clk_disable_unprepare(kp->clk);
-               return ret;
-       }
-
        return 0;
 }
 
@@ -248,11 +232,53 @@ static void kona_pwmc_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        clk_disable_unprepare(kp->clk);
 }
 
+static int kona_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                          const struct pwm_state *state)
+{
+       int err;
+       struct kona_pwmc *kp = to_kona_pwmc(chip);
+       bool enabled = pwm->state.enabled;
+
+       if (state->polarity != pwm->state.polarity) {
+               if (enabled) {
+                       kona_pwmc_disable(chip, pwm);
+                       enabled = false;
+               }
+
+               err = kona_pwmc_set_polarity(chip, pwm, state->polarity);
+               if (err)
+                       return err;
+
+               pwm->state.polarity = state->polarity;
+       }
+
+       if (!state->enabled) {
+               if (enabled)
+                       kona_pwmc_disable(chip, pwm);
+               return 0;
+       } else if (!enabled) {
+               /*
+                * This is a bit special here, usually the PWM should only be
+                * enabled when duty and period are setup. But before this
+                * driver was converted to .apply it was done the other way
+                * around and so this behaviour was kept even though this might
+                * result in a glitch. This might be improvable by someone with
+                * hardware and/or documentation.
+                */
+               err = kona_pwmc_enable(chip, pwm);
+               if (err)
+                       return err;
+       }
+
+       err = kona_pwmc_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       if (err && !pwm->state.enabled)
+               clk_disable_unprepare(kp->clk);
+
+       return err;
+}
+
 static const struct pwm_ops kona_pwm_ops = {
-       .config = kona_pwmc_config,
-       .set_polarity = kona_pwmc_set_polarity,
-       .enable = kona_pwmc_enable,
-       .disable = kona_pwmc_disable,
+       .apply = kona_pwmc_apply,
        .owner = THIS_MODULE,
 };
 
index 3b529f8..3db3f96 100644 (file)
@@ -53,7 +53,6 @@
 
 struct brcmstb_pwm {
        void __iomem *base;
-       spinlock_t lock;
        struct clk *clk;
        struct pwm_chip chip;
 };
@@ -95,7 +94,7 @@ static inline struct brcmstb_pwm *to_brcmstb_pwm(struct pwm_chip *chip)
  * "on" time, so this translates directly into our HW programming here.
  */
 static int brcmstb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
-                             int duty_ns, int period_ns)
+                             u64 duty_ns, u64 period_ns)
 {
        struct brcmstb_pwm *p = to_brcmstb_pwm(chip);
        unsigned long pc, dc, cword = CONST_VAR_F_MAX;
@@ -114,22 +113,17 @@ static int brcmstb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        }
 
        while (1) {
-               u64 rate, tmp;
+               u64 rate;
 
                /*
                 * Calculate the base rate from base frequency and current
                 * cword
                 */
                rate = (u64)clk_get_rate(p->clk) * (u64)cword;
-               do_div(rate, 1 << CWORD_BIT_SIZE);
+               rate >>= CWORD_BIT_SIZE;
 
-               tmp = period_ns * rate;
-               do_div(tmp, NSEC_PER_SEC);
-               pc = tmp;
-
-               tmp = (duty_ns + 1) * rate;
-               do_div(tmp, NSEC_PER_SEC);
-               dc = tmp;
+               pc = mul_u64_u64_div_u64(period_ns, rate, NSEC_PER_SEC);
+               dc = mul_u64_u64_div_u64(duty_ns + 1, rate, NSEC_PER_SEC);
 
                /*
                 * We can be called with separate duty and period updates,
@@ -164,7 +158,6 @@ done:
         * generator output a base frequency for the constant frequency
         * generator to derive from.
         */
-       spin_lock(&p->lock);
        brcmstb_pwm_writel(p, cword >> 8, PWM_CWORD_MSB(channel));
        brcmstb_pwm_writel(p, cword & 0xff, PWM_CWORD_LSB(channel));
 
@@ -176,7 +169,6 @@ done:
        /* Configure on and period value */
        brcmstb_pwm_writel(p, pc, PWM_PERIOD(channel));
        brcmstb_pwm_writel(p, dc, PWM_ON(channel));
-       spin_unlock(&p->lock);
 
        return 0;
 }
@@ -187,7 +179,6 @@ static inline void brcmstb_pwm_enable_set(struct brcmstb_pwm *p,
        unsigned int shift = channel * CTRL_CHAN_OFFS;
        u32 value;
 
-       spin_lock(&p->lock);
        value = brcmstb_pwm_readl(p, PWM_CTRL);
 
        if (enable) {
@@ -199,29 +190,36 @@ static inline void brcmstb_pwm_enable_set(struct brcmstb_pwm *p,
        }
 
        brcmstb_pwm_writel(p, value, PWM_CTRL);
-       spin_unlock(&p->lock);
 }
 
-static int brcmstb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int brcmstb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                            const struct pwm_state *state)
 {
        struct brcmstb_pwm *p = to_brcmstb_pwm(chip);
+       int err;
 
-       brcmstb_pwm_enable_set(p, pwm->hwpwm, true);
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
 
-       return 0;
-}
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       brcmstb_pwm_enable_set(p, pwm->hwpwm, false);
 
-static void brcmstb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct brcmstb_pwm *p = to_brcmstb_pwm(chip);
+               return 0;
+       }
+
+       err = brcmstb_pwm_config(chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               brcmstb_pwm_enable_set(p, pwm->hwpwm, true);
 
-       brcmstb_pwm_enable_set(p, pwm->hwpwm, false);
+       return 0;
 }
 
 static const struct pwm_ops brcmstb_pwm_ops = {
-       .config = brcmstb_pwm_config,
-       .enable = brcmstb_pwm_enable,
-       .disable = brcmstb_pwm_disable,
+       .apply = brcmstb_pwm_apply,
        .owner = THIS_MODULE,
 };
 
@@ -240,8 +238,6 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
        if (!p)
                return -ENOMEM;
 
-       spin_lock_init(&p->lock);
-
        p->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(p->clk)) {
                dev_err(&pdev->dev, "failed to obtain clock\n");
index 5996049..0fccf06 100644 (file)
@@ -77,16 +77,15 @@ static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
        return container_of(chip, struct img_pwm_chip, chip);
 }
 
-static inline void img_pwm_writel(struct img_pwm_chip *chip,
+static inline void img_pwm_writel(struct img_pwm_chip *imgchip,
                                  u32 reg, u32 val)
 {
-       writel(val, chip->base + reg);
+       writel(val, imgchip->base + reg);
 }
 
-static inline u32 img_pwm_readl(struct img_pwm_chip *chip,
-                                        u32 reg)
+static inline u32 img_pwm_readl(struct img_pwm_chip *imgchip, u32 reg)
 {
-       return readl(chip->base + reg);
+       return readl(imgchip->base + reg);
 }
 
 static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -94,17 +93,17 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 {
        u32 val, div, duty, timebase;
        unsigned long mul, output_clk_hz, input_clk_hz;
-       struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
-       unsigned int max_timebase = pwm_chip->data->max_timebase;
+       struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
+       unsigned int max_timebase = imgchip->data->max_timebase;
        int ret;
 
-       if (period_ns < pwm_chip->min_period_ns ||
-           period_ns > pwm_chip->max_period_ns) {
+       if (period_ns < imgchip->min_period_ns ||
+           period_ns > imgchip->max_period_ns) {
                dev_err(chip->dev, "configured period not in range\n");
                return -ERANGE;
        }
 
-       input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
+       input_clk_hz = clk_get_rate(imgchip->pwm_clk);
        output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
 
        mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
@@ -132,15 +131,15 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        if (ret < 0)
                return ret;
 
-       val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+       val = img_pwm_readl(imgchip, PWM_CTRL_CFG);
        val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
        val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
                PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm);
-       img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+       img_pwm_writel(imgchip, PWM_CTRL_CFG, val);
 
        val = (duty << PWM_CH_CFG_DUTY_SHIFT) |
              (timebase << PWM_CH_CFG_TMBASE_SHIFT);
-       img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
+       img_pwm_writel(imgchip, PWM_CH_CFG(pwm->hwpwm), val);
 
        pm_runtime_mark_last_busy(chip->dev);
        pm_runtime_put_autosuspend(chip->dev);
@@ -151,18 +150,18 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        u32 val;
-       struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+       struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
        int ret;
 
        ret = pm_runtime_resume_and_get(chip->dev);
        if (ret < 0)
                return ret;
 
-       val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+       val = img_pwm_readl(imgchip, PWM_CTRL_CFG);
        val |= BIT(pwm->hwpwm);
-       img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+       img_pwm_writel(imgchip, PWM_CTRL_CFG, val);
 
-       regmap_update_bits(pwm_chip->periph_regs, PERIP_PWM_PDM_CONTROL,
+       regmap_update_bits(imgchip->periph_regs, PERIP_PWM_PDM_CONTROL,
                           PERIP_PWM_PDM_CONTROL_CH_MASK <<
                           PERIP_PWM_PDM_CONTROL_CH_SHIFT(pwm->hwpwm), 0);
 
@@ -172,11 +171,11 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        u32 val;
-       struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+       struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
 
-       val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+       val = img_pwm_readl(imgchip, PWM_CTRL_CFG);
        val &= ~BIT(pwm->hwpwm);
-       img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+       img_pwm_writel(imgchip, PWM_CTRL_CFG, val);
 
        pm_runtime_mark_last_busy(chip->dev);
        pm_runtime_put_autosuspend(chip->dev);
@@ -227,29 +226,29 @@ MODULE_DEVICE_TABLE(of, img_pwm_of_match);
 
 static int img_pwm_runtime_suspend(struct device *dev)
 {
-       struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+       struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
 
-       clk_disable_unprepare(pwm_chip->pwm_clk);
-       clk_disable_unprepare(pwm_chip->sys_clk);
+       clk_disable_unprepare(imgchip->pwm_clk);
+       clk_disable_unprepare(imgchip->sys_clk);
 
        return 0;
 }
 
 static int img_pwm_runtime_resume(struct device *dev)
 {
-       struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+       struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
        int ret;
 
-       ret = clk_prepare_enable(pwm_chip->sys_clk);
+       ret = clk_prepare_enable(imgchip->sys_clk);
        if (ret < 0) {
                dev_err(dev, "could not prepare or enable sys clock\n");
                return ret;
        }
 
-       ret = clk_prepare_enable(pwm_chip->pwm_clk);
+       ret = clk_prepare_enable(imgchip->pwm_clk);
        if (ret < 0) {
                dev_err(dev, "could not prepare or enable pwm clock\n");
-               clk_disable_unprepare(pwm_chip->sys_clk);
+               clk_disable_unprepare(imgchip->sys_clk);
                return ret;
        }
 
@@ -261,42 +260,42 @@ static int img_pwm_probe(struct platform_device *pdev)
        int ret;
        u64 val;
        unsigned long clk_rate;
-       struct img_pwm_chip *pwm;
+       struct img_pwm_chip *imgchip;
        const struct of_device_id *of_dev_id;
 
-       pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
-       if (!pwm)
+       imgchip = devm_kzalloc(&pdev->dev, sizeof(*imgchip), GFP_KERNEL);
+       if (!imgchip)
                return -ENOMEM;
 
-       pwm->dev = &pdev->dev;
+       imgchip->dev = &pdev->dev;
 
-       pwm->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(pwm->base))
-               return PTR_ERR(pwm->base);
+       imgchip->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(imgchip->base))
+               return PTR_ERR(imgchip->base);
 
        of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
        if (!of_dev_id)
                return -ENODEV;
-       pwm->data = of_dev_id->data;
+       imgchip->data = of_dev_id->data;
 
-       pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-                                                          "img,cr-periph");
-       if (IS_ERR(pwm->periph_regs))
-               return PTR_ERR(pwm->periph_regs);
+       imgchip->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                              "img,cr-periph");
+       if (IS_ERR(imgchip->periph_regs))
+               return PTR_ERR(imgchip->periph_regs);
 
-       pwm->sys_clk = devm_clk_get(&pdev->dev, "sys");
-       if (IS_ERR(pwm->sys_clk)) {
+       imgchip->sys_clk = devm_clk_get(&pdev->dev, "sys");
+       if (IS_ERR(imgchip->sys_clk)) {
                dev_err(&pdev->dev, "failed to get system clock\n");
-               return PTR_ERR(pwm->sys_clk);
+               return PTR_ERR(imgchip->sys_clk);
        }
 
-       pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
-       if (IS_ERR(pwm->pwm_clk)) {
-               dev_err(&pdev->dev, "failed to get pwm clock\n");
-               return PTR_ERR(pwm->pwm_clk);
+       imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");
+       if (IS_ERR(imgchip->pwm_clk)) {
+               dev_err(&pdev->dev, "failed to get imgchip clock\n");
+               return PTR_ERR(imgchip->pwm_clk);
        }
 
-       platform_set_drvdata(pdev, pwm);
+       platform_set_drvdata(pdev, imgchip);
 
        pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
        pm_runtime_use_autosuspend(&pdev->dev);
@@ -307,27 +306,27 @@ static int img_pwm_probe(struct platform_device *pdev)
                        goto err_pm_disable;
        }
 
-       clk_rate = clk_get_rate(pwm->pwm_clk);
+       clk_rate = clk_get_rate(imgchip->pwm_clk);
        if (!clk_rate) {
-               dev_err(&pdev->dev, "pwm clock has no frequency\n");
+               dev_err(&pdev->dev, "imgchip clock has no frequency\n");
                ret = -EINVAL;
                goto err_suspend;
        }
 
        /* The maximum input clock divider is 512 */
-       val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
+       val = (u64)NSEC_PER_SEC * 512 * imgchip->data->max_timebase;
        do_div(val, clk_rate);
-       pwm->max_period_ns = val;
+       imgchip->max_period_ns = val;
 
        val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
        do_div(val, clk_rate);
-       pwm->min_period_ns = val;
+       imgchip->min_period_ns = val;
 
-       pwm->chip.dev = &pdev->dev;
-       pwm->chip.ops = &img_pwm_ops;
-       pwm->chip.npwm = IMG_PWM_NPWM;
+       imgchip->chip.dev = &pdev->dev;
+       imgchip->chip.ops = &img_pwm_ops;
+       imgchip->chip.npwm = IMG_PWM_NPWM;
 
-       ret = pwmchip_add(&pwm->chip);
+       ret = pwmchip_add(&imgchip->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
                goto err_suspend;
@@ -346,13 +345,13 @@ err_pm_disable:
 
 static int img_pwm_remove(struct platform_device *pdev)
 {
-       struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
+       struct img_pwm_chip *imgchip = platform_get_drvdata(pdev);
 
        pm_runtime_disable(&pdev->dev);
        if (!pm_runtime_status_suspended(&pdev->dev))
                img_pwm_runtime_suspend(&pdev->dev);
 
-       pwmchip_remove(&pwm_chip->chip);
+       pwmchip_remove(&imgchip->chip);
 
        return 0;
 }
@@ -360,7 +359,7 @@ static int img_pwm_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int img_pwm_suspend(struct device *dev)
 {
-       struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+       struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
        int i, ret;
 
        if (pm_runtime_status_suspended(dev)) {
@@ -369,11 +368,11 @@ static int img_pwm_suspend(struct device *dev)
                        return ret;
        }
 
-       for (i = 0; i < pwm_chip->chip.npwm; i++)
-               pwm_chip->suspend_ch_cfg[i] = img_pwm_readl(pwm_chip,
-                                                           PWM_CH_CFG(i));
+       for (i = 0; i < imgchip->chip.npwm; i++)
+               imgchip->suspend_ch_cfg[i] = img_pwm_readl(imgchip,
+                                                          PWM_CH_CFG(i));
 
-       pwm_chip->suspend_ctrl_cfg = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+       imgchip->suspend_ctrl_cfg = img_pwm_readl(imgchip, PWM_CTRL_CFG);
 
        img_pwm_runtime_suspend(dev);
 
@@ -382,7 +381,7 @@ static int img_pwm_suspend(struct device *dev)
 
 static int img_pwm_resume(struct device *dev)
 {
-       struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+       struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
        int ret;
        int i;
 
@@ -390,15 +389,15 @@ static int img_pwm_resume(struct device *dev)
        if (ret)
                return ret;
 
-       for (i = 0; i < pwm_chip->chip.npwm; i++)
-               img_pwm_writel(pwm_chip, PWM_CH_CFG(i),
-                              pwm_chip->suspend_ch_cfg[i]);
+       for (i = 0; i < imgchip->chip.npwm; i++)
+               img_pwm_writel(imgchip, PWM_CH_CFG(i),
+                              imgchip->suspend_ch_cfg[i]);
 
-       img_pwm_writel(pwm_chip, PWM_CTRL_CFG, pwm_chip->suspend_ctrl_cfg);
+       img_pwm_writel(imgchip, PWM_CTRL_CFG, imgchip->suspend_ctrl_cfg);
 
-       for (i = 0; i < pwm_chip->chip.npwm; i++)
-               if (pwm_chip->suspend_ctrl_cfg & BIT(i))
-                       regmap_update_bits(pwm_chip->periph_regs,
+       for (i = 0; i < imgchip->chip.npwm; i++)
+               if (imgchip->suspend_ctrl_cfg & BIT(i))
+                       regmap_update_bits(imgchip->periph_regs,
                                           PERIP_PWM_PDM_CONTROL,
                                           PERIP_PWM_PDM_CONTROL_CH_MASK <<
                                           PERIP_PWM_PDM_CONTROL_CH_SHIFT(i),
index bcd8494..1f2eb1c 100644 (file)
@@ -61,7 +61,7 @@ static void pwm_imx1_clk_disable_unprepare(struct pwm_chip *chip)
 }
 
 static int pwm_imx1_config(struct pwm_chip *chip,
-                          struct pwm_device *pwm, int duty_ns, int period_ns)
+                          struct pwm_device *pwm, u64 duty_ns, u64 period_ns)
 {
        struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
        u32 max, p;
@@ -84,7 +84,7 @@ static int pwm_imx1_config(struct pwm_chip *chip,
         * (/2 .. /16).
         */
        max = readl(imx->mmio_base + MX1_PWMP);
-       p = max * duty_ns / period_ns;
+       p = mul_u64_u64_div_u64(max, duty_ns, period_ns);
 
        writel(max - p, imx->mmio_base + MX1_PWMS);
 
@@ -120,10 +120,33 @@ static void pwm_imx1_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        pwm_imx1_clk_disable_unprepare(chip);
 }
 
+static int pwm_imx1_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                         const struct pwm_state *state)
+{
+       int err;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       pwm_imx1_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = pwm_imx1_config(chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               return pwm_imx1_enable(chip, pwm);
+
+       return 0;
+}
+
 static const struct pwm_ops pwm_imx1_ops = {
-       .enable = pwm_imx1_enable,
-       .disable = pwm_imx1_disable,
-       .config = pwm_imx1_config,
+       .apply = pwm_imx1_apply,
        .owner = THIS_MODULE,
 };
 
index 23dc1fb..a5fdf97 100644 (file)
@@ -256,10 +256,15 @@ static const struct soc_info __maybe_unused jz4725b_soc_info = {
        .num_pwms = 6,
 };
 
+static const struct soc_info __maybe_unused x1000_soc_info = {
+       .num_pwms = 5,
+};
+
 #ifdef CONFIG_OF
 static const struct of_device_id jz4740_pwm_dt_ids[] = {
        { .compatible = "ingenic,jz4740-pwm", .data = &jz4740_soc_info },
        { .compatible = "ingenic,jz4725b-pwm", .data = &jz4725b_soc_info },
+       { .compatible = "ingenic,x1000-pwm", .data = &x1000_soc_info },
        {},
 };
 MODULE_DEVICE_TABLE(of, jz4740_pwm_dt_ids);
index 8e461f3..b909096 100644 (file)
@@ -76,6 +76,8 @@
 #define LPC18XX_PWM_EVENT_PERIOD       0
 #define LPC18XX_PWM_EVENT_MAX          16
 
+#define LPC18XX_NUM_PWMS               16
+
 /* SCT conflict resolution */
 enum lpc18xx_pwm_res_action {
        LPC18XX_PWM_RES_NONE,
@@ -101,6 +103,7 @@ struct lpc18xx_pwm_chip {
        unsigned long event_map;
        struct mutex res_lock;
        struct mutex period_lock;
+       struct lpc18xx_pwm_data channeldata[LPC18XX_NUM_PWMS];
 };
 
 static inline struct lpc18xx_pwm_chip *
@@ -163,7 +166,7 @@ static void lpc18xx_pwm_config_duty(struct pwm_chip *chip,
                                    struct pwm_device *pwm, int duty_ns)
 {
        struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
-       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+       struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
        u64 val;
 
        val = (u64)duty_ns * lpc18xx_pwm->clk_rate;
@@ -233,7 +236,7 @@ static int lpc18xx_pwm_set_polarity(struct pwm_chip *chip,
 static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
-       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+       struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
        enum lpc18xx_pwm_res_action res_action;
        unsigned int set_event, clear_event;
 
@@ -268,7 +271,7 @@ static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 static void lpc18xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
-       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+       struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
 
        lpc18xx_pwm_writel(lpc18xx_pwm,
                           LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event), 0);
@@ -279,7 +282,7 @@ static void lpc18xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
-       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+       struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
        unsigned long event;
 
        event = find_first_zero_bit(&lpc18xx_pwm->event_map,
@@ -300,7 +303,7 @@ static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
 static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
-       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+       struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
 
        clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
 }
@@ -324,8 +327,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_pwm_of_match);
 static int lpc18xx_pwm_probe(struct platform_device *pdev)
 {
        struct lpc18xx_pwm_chip *lpc18xx_pwm;
-       struct pwm_device *pwm;
-       int ret, i;
+       int ret;
        u64 val;
 
        lpc18xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*lpc18xx_pwm),
@@ -370,7 +372,7 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
 
        lpc18xx_pwm->chip.dev = &pdev->dev;
        lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
-       lpc18xx_pwm->chip.npwm = 16;
+       lpc18xx_pwm->chip.npwm = LPC18XX_NUM_PWMS;
 
        /* SCT counter must be in unify (32 bit) mode */
        lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CONFIG,
@@ -395,40 +397,23 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
        lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT,
                           BIT(lpc18xx_pwm->period_event));
 
+       val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
+       val &= ~LPC18XX_PWM_BIDIR;
+       val &= ~LPC18XX_PWM_CTRL_HALT;
+       val &= ~LPC18XX_PWM_PRE_MASK;
+       val |= LPC18XX_PWM_PRE(0);
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
+
        ret = pwmchip_add(&lpc18xx_pwm->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
                goto disable_pwmclk;
        }
 
-       for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) {
-               struct lpc18xx_pwm_data *data;
-
-               pwm = &lpc18xx_pwm->chip.pwms[i];
-
-               data = devm_kzalloc(lpc18xx_pwm->dev, sizeof(*data),
-                                   GFP_KERNEL);
-               if (!data) {
-                       ret = -ENOMEM;
-                       goto remove_pwmchip;
-               }
-
-               pwm_set_chip_data(pwm, data);
-       }
-
        platform_set_drvdata(pdev, lpc18xx_pwm);
 
-       val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
-       val &= ~LPC18XX_PWM_BIDIR;
-       val &= ~LPC18XX_PWM_CTRL_HALT;
-       val &= ~LPC18XX_PWM_PRE_MASK;
-       val |= LPC18XX_PWM_PRE(0);
-       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
-
        return 0;
 
-remove_pwmchip:
-       pwmchip_remove(&lpc18xx_pwm->chip);
 disable_pwmclk:
        clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
        return ret;
index 0d4dd80..568b13a 100644 (file)
@@ -146,7 +146,7 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
 
        if (clkdiv > PWM_CLK_DIV_MAX) {
                pwm_mediatek_clk_disable(chip, pwm);
-               dev_err(chip->dev, "period %d not supported\n", period_ns);
+               dev_err(chip->dev, "period of %d ns not supported\n", period_ns);
                return -EINVAL;
        }
 
@@ -221,24 +221,20 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
        if (IS_ERR(pc->regs))
                return PTR_ERR(pc->regs);
 
-       pc->clk_pwms = devm_kcalloc(&pdev->dev, pc->soc->num_pwms,
+       pc->clk_pwms = devm_kmalloc_array(&pdev->dev, pc->soc->num_pwms,
                                    sizeof(*pc->clk_pwms), GFP_KERNEL);
        if (!pc->clk_pwms)
                return -ENOMEM;
 
        pc->clk_top = devm_clk_get(&pdev->dev, "top");
-       if (IS_ERR(pc->clk_top)) {
-               dev_err(&pdev->dev, "clock: top fail: %ld\n",
-                       PTR_ERR(pc->clk_top));
-               return PTR_ERR(pc->clk_top);
-       }
+       if (IS_ERR(pc->clk_top))
+               return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk_top),
+                                    "Failed to get top clock\n");
 
        pc->clk_main = devm_clk_get(&pdev->dev, "main");
-       if (IS_ERR(pc->clk_main)) {
-               dev_err(&pdev->dev, "clock: main fail: %ld\n",
-                       PTR_ERR(pc->clk_main));
-               return PTR_ERR(pc->clk_main);
-       }
+       if (IS_ERR(pc->clk_main))
+               return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk_main),
+                                    "Failed to get main clock\n");
 
        for (i = 0; i < pc->soc->num_pwms; i++) {
                char name[8];
@@ -246,11 +242,9 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
                snprintf(name, sizeof(name), "pwm%d", i + 1);
 
                pc->clk_pwms[i] = devm_clk_get(&pdev->dev, name);
-               if (IS_ERR(pc->clk_pwms[i])) {
-                       dev_err(&pdev->dev, "clock: %s fail: %ld\n",
-                               name, PTR_ERR(pc->clk_pwms[i]));
-                       return PTR_ERR(pc->clk_pwms[i]);
-               }
+               if (IS_ERR(pc->clk_pwms[i]))
+                       return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk_pwms[i]),
+                                            "Failed to get %s clock\n", name);
        }
 
        pc->chip.dev = &pdev->dev;
@@ -258,10 +252,8 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
        pc->chip.npwm = pc->soc->num_pwms;
 
        ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
 
        return 0;
 }
index 3cf3bcf..57112f4 100644 (file)
@@ -120,16 +120,10 @@ static inline struct meson_pwm *to_meson_pwm(struct pwm_chip *chip)
 static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct meson_pwm *meson = to_meson_pwm(chip);
-       struct meson_pwm_channel *channel;
+       struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
        struct device *dev = chip->dev;
        int err;
 
-       channel = pwm_get_chip_data(pwm);
-       if (channel)
-               return 0;
-
-       channel = &meson->channels[pwm->hwpwm];
-
        if (channel->clk_parent) {
                err = clk_set_parent(channel->clk, channel->clk_parent);
                if (err < 0) {
@@ -147,21 +141,21 @@ static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
                return err;
        }
 
-       return pwm_set_chip_data(pwm, channel);
+       return 0;
 }
 
 static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+       struct meson_pwm *meson = to_meson_pwm(chip);
+       struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
 
-       if (channel)
-               clk_disable_unprepare(channel->clk);
+       clk_disable_unprepare(channel->clk);
 }
 
 static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
                          const struct pwm_state *state)
 {
-       struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+       struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
        unsigned int duty, period, pre_div, cnt, duty_cnt;
        unsigned long fin_freq;
 
@@ -224,7 +218,7 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
 
 static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
 {
-       struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+       struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
        struct meson_pwm_channel_data *channel_data;
        unsigned long flags;
        u32 value;
@@ -267,13 +261,10 @@ static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm)
 static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                           const struct pwm_state *state)
 {
-       struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
        struct meson_pwm *meson = to_meson_pwm(chip);
+       struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
        int err = 0;
 
-       if (!state)
-               return -EINVAL;
-
        if (!state->enabled) {
                if (state->polarity == PWM_POLARITY_INVERSED) {
                        /*
index c56001a..c91fa7f 100644 (file)
@@ -560,10 +560,10 @@ static int pca9685_pwm_probe(struct i2c_client *client,
        pca9685_write_reg(pca, PCA9685_MODE1, reg);
 
        /* Reset OFF/ON registers to POR default */
-       pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_L, LED_FULL);
+       pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_L, 0);
        pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_H, LED_FULL);
        pca9685_write_reg(pca, PCA9685_ALL_LED_ON_L, 0);
-       pca9685_write_reg(pca, PCA9685_ALL_LED_ON_H, 0);
+       pca9685_write_reg(pca, PCA9685_ALL_LED_ON_H, LED_FULL);
 
        pca->chip.ops = &pca9685_pwm_ops;
        /* Add an extra channel for ALL_LED */
index 238ec88..0bcaa58 100644 (file)
@@ -58,7 +58,7 @@ static inline struct pxa_pwm_chip *to_pxa_pwm_chip(struct pwm_chip *chip)
  * duty_ns   = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
  */
 static int pxa_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
-                         int duty_ns, int period_ns)
+                         u64 duty_ns, u64 period_ns)
 {
        struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
        unsigned long long c;
@@ -84,7 +84,7 @@ static int pxa_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        if (duty_ns == period_ns)
                dc = PWMDCR_FD;
        else
-               dc = (pv + 1) * duty_ns / period_ns;
+               dc = mul_u64_u64_div_u64(pv + 1, duty_ns, period_ns);
 
        /* NOTE: the clock to PWM has to be enabled first
         * before writing to the registers
@@ -115,10 +115,33 @@ static void pxa_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        clk_disable_unprepare(pc->clk);
 }
 
+static int pxa_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                        const struct pwm_state *state)
+{
+       int err;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       pxa_pwm_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = pxa_pwm_config(chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               return pxa_pwm_enable(chip, pwm);
+
+       return 0;
+}
+
 static const struct pwm_ops pxa_pwm_ops = {
-       .config = pxa_pwm_config,
-       .enable = pxa_pwm_enable,
-       .disable = pxa_pwm_disable,
+       .apply = pxa_pwm_apply,
        .owner = THIS_MODULE,
 };
 
index 579a152..e52e29f 100644 (file)
@@ -163,7 +163,6 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
        rpipwm->firmware = firmware;
        rpipwm->chip.dev = dev;
        rpipwm->chip.ops = &raspberrypi_pwm_ops;
-       rpipwm->chip.base = -1;
        rpipwm->chip.npwm = RASPBERRYPI_FIRMWARE_PWM_NUM;
 
        ret = raspberrypi_pwm_get_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
index b437192..55f46d0 100644 (file)
@@ -110,7 +110,7 @@ static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
        unsigned long clk_rate = clk_get_rate(rp->clk);
        u32 cyc, ph;
 
-       one_cycle = (unsigned long long)NSEC_PER_SEC * 100ULL * (1 << div);
+       one_cycle = NSEC_PER_SEC * 100ULL << div;
        do_div(one_cycle, clk_rate);
 
        tmp = period_ns * 100ULL;
index 9dc983a..c4336d3 100644 (file)
@@ -269,19 +269,19 @@ static const struct pwm_ops stmpe_24xx_pwm_ops = {
 static int __init stmpe_pwm_probe(struct platform_device *pdev)
 {
        struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
-       struct stmpe_pwm *pwm;
+       struct stmpe_pwm *stmpe_pwm;
        int ret;
 
-       pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
-       if (!pwm)
+       stmpe_pwm = devm_kzalloc(&pdev->dev, sizeof(*stmpe_pwm), GFP_KERNEL);
+       if (!stmpe_pwm)
                return -ENOMEM;
 
-       pwm->stmpe = stmpe;
-       pwm->chip.dev = &pdev->dev;
+       stmpe_pwm->stmpe = stmpe;
+       stmpe_pwm->chip.dev = &pdev->dev;
 
        if (stmpe->partnum == STMPE2401 || stmpe->partnum == STMPE2403) {
-               pwm->chip.ops = &stmpe_24xx_pwm_ops;
-               pwm->chip.npwm = 3;
+               stmpe_pwm->chip.ops = &stmpe_24xx_pwm_ops;
+               stmpe_pwm->chip.npwm = 3;
        } else {
                if (stmpe->partnum == STMPE1601)
                        dev_err(&pdev->dev, "STMPE1601 not yet supported\n");
@@ -295,14 +295,12 @@ static int __init stmpe_pwm_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       ret = pwmchip_add(&pwm->chip);
+       ret = pwmchip_add(&stmpe_pwm->chip);
        if (ret) {
                stmpe_disable(stmpe, STMPE_BLOCK_PWM);
                return ret;
        }
 
-       platform_set_drvdata(pdev, pwm);
-
        return 0;
 }
 
index 91ca676..16d75f9 100644 (file)
@@ -390,20 +390,20 @@ MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
 
 static int sun4i_pwm_probe(struct platform_device *pdev)
 {
-       struct sun4i_pwm_chip *pwm;
+       struct sun4i_pwm_chip *sun4ichip;
        int ret;
 
-       pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
-       if (!pwm)
+       sun4ichip = devm_kzalloc(&pdev->dev, sizeof(*sun4ichip), GFP_KERNEL);
+       if (!sun4ichip)
                return -ENOMEM;
 
-       pwm->data = of_device_get_match_data(&pdev->dev);
-       if (!pwm->data)
+       sun4ichip->data = of_device_get_match_data(&pdev->dev);
+       if (!sun4ichip->data)
                return -ENODEV;
 
-       pwm->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(pwm->base))
-               return PTR_ERR(pwm->base);
+       sun4ichip->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(sun4ichip->base))
+               return PTR_ERR(sun4ichip->base);
 
        /*
         * All hardware variants need a source clock that is divided and
@@ -416,30 +416,30 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
         * unnamed one of the PWM device) and if this is not found we fall
         * back to the first clock of the PWM.
         */
-       pwm->clk = devm_clk_get_optional(&pdev->dev, "mod");
-       if (IS_ERR(pwm->clk))
-               return dev_err_probe(&pdev->dev, PTR_ERR(pwm->clk),
+       sun4ichip->clk = devm_clk_get_optional(&pdev->dev, "mod");
+       if (IS_ERR(sun4ichip->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(sun4ichip->clk),
                                     "get mod clock failed\n");
 
-       if (!pwm->clk) {
-               pwm->clk = devm_clk_get(&pdev->dev, NULL);
-               if (IS_ERR(pwm->clk))
-                       return dev_err_probe(&pdev->dev, PTR_ERR(pwm->clk),
+       if (!sun4ichip->clk) {
+               sun4ichip->clk = devm_clk_get(&pdev->dev, NULL);
+               if (IS_ERR(sun4ichip->clk))
+                       return dev_err_probe(&pdev->dev, PTR_ERR(sun4ichip->clk),
                                             "get unnamed clock failed\n");
        }
 
-       pwm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
-       if (IS_ERR(pwm->bus_clk))
-               return dev_err_probe(&pdev->dev, PTR_ERR(pwm->bus_clk),
+       sun4ichip->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
+       if (IS_ERR(sun4ichip->bus_clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(sun4ichip->bus_clk),
                                     "get bus clock failed\n");
 
-       pwm->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
-       if (IS_ERR(pwm->rst))
-               return dev_err_probe(&pdev->dev, PTR_ERR(pwm->rst),
+       sun4ichip->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+       if (IS_ERR(sun4ichip->rst))
+               return dev_err_probe(&pdev->dev, PTR_ERR(sun4ichip->rst),
                                     "get reset failed\n");
 
        /* Deassert reset */
-       ret = reset_control_deassert(pwm->rst);
+       ret = reset_control_deassert(sun4ichip->rst);
        if (ret) {
                dev_err(&pdev->dev, "cannot deassert reset control: %pe\n",
                        ERR_PTR(ret));
@@ -450,45 +450,45 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
         * We're keeping the bus clock on for the sake of simplicity.
         * Actually it only needs to be on for hardware register accesses.
         */
-       ret = clk_prepare_enable(pwm->bus_clk);
+       ret = clk_prepare_enable(sun4ichip->bus_clk);
        if (ret) {
                dev_err(&pdev->dev, "cannot prepare and enable bus_clk %pe\n",
                        ERR_PTR(ret));
                goto err_bus;
        }
 
-       pwm->chip.dev = &pdev->dev;
-       pwm->chip.ops = &sun4i_pwm_ops;
-       pwm->chip.npwm = pwm->data->npwm;
+       sun4ichip->chip.dev = &pdev->dev;
+       sun4ichip->chip.ops = &sun4i_pwm_ops;
+       sun4ichip->chip.npwm = sun4ichip->data->npwm;
 
-       spin_lock_init(&pwm->ctrl_lock);
+       spin_lock_init(&sun4ichip->ctrl_lock);
 
-       ret = pwmchip_add(&pwm->chip);
+       ret = pwmchip_add(&sun4ichip->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
                goto err_pwm_add;
        }
 
-       platform_set_drvdata(pdev, pwm);
+       platform_set_drvdata(pdev, sun4ichip);
 
        return 0;
 
 err_pwm_add:
-       clk_disable_unprepare(pwm->bus_clk);
+       clk_disable_unprepare(sun4ichip->bus_clk);
 err_bus:
-       reset_control_assert(pwm->rst);
+       reset_control_assert(sun4ichip->rst);
 
        return ret;
 }
 
 static int sun4i_pwm_remove(struct platform_device *pdev)
 {
-       struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
+       struct sun4i_pwm_chip *sun4ichip = platform_get_drvdata(pdev);
 
-       pwmchip_remove(&pwm->chip);
+       pwmchip_remove(&sun4ichip->chip);
 
-       clk_disable_unprepare(pwm->bus_clk);
-       reset_control_assert(pwm->rst);
+       clk_disable_unprepare(sun4ichip->bus_clk);
+       reset_control_assert(sun4ichip->rst);
 
        return 0;
 }
index 18cf974..e5a9ffe 100644 (file)
@@ -85,15 +85,14 @@ static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip)
        return container_of(chip, struct tegra_pwm_chip, chip);
 }
 
-static inline u32 pwm_readl(struct tegra_pwm_chip *chip, unsigned int num)
+static inline u32 pwm_readl(struct tegra_pwm_chip *pc, unsigned int offset)
 {
-       return readl(chip->regs + (num << 4));
+       return readl(pc->regs + (offset << 4));
 }
 
-static inline void pwm_writel(struct tegra_pwm_chip *chip, unsigned int num,
-                            unsigned long val)
+static inline void pwm_writel(struct tegra_pwm_chip *pc, unsigned int offset, u32 value)
 {
-       writel(val, chip->regs + (num << 4));
+       writel(value, pc->regs + (offset << 4));
 }
 
 static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -240,25 +239,25 @@ static const struct pwm_ops tegra_pwm_ops = {
 
 static int tegra_pwm_probe(struct platform_device *pdev)
 {
-       struct tegra_pwm_chip *pwm;
+       struct tegra_pwm_chip *pc;
        int ret;
 
-       pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
-       if (!pwm)
+       pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+       if (!pc)
                return -ENOMEM;
 
-       pwm->soc = of_device_get_match_data(&pdev->dev);
-       pwm->dev = &pdev->dev;
+       pc->soc = of_device_get_match_data(&pdev->dev);
+       pc->dev = &pdev->dev;
 
-       pwm->regs = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(pwm->regs))
-               return PTR_ERR(pwm->regs);
+       pc->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(pc->regs))
+               return PTR_ERR(pc->regs);
 
-       platform_set_drvdata(pdev, pwm);
+       platform_set_drvdata(pdev, pc);
 
-       pwm->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(pwm->clk))
-               return PTR_ERR(pwm->clk);
+       pc->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(pc->clk))
+               return PTR_ERR(pc->clk);
 
        ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
        if (ret)
@@ -270,7 +269,7 @@ static int tegra_pwm_probe(struct platform_device *pdev)
                return ret;
 
        /* Set maximum frequency of the IP */
-       ret = dev_pm_opp_set_rate(pwm->dev, pwm->soc->max_frequency);
+       ret = dev_pm_opp_set_rate(pc->dev, pc->soc->max_frequency);
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret);
                goto put_pm;
@@ -281,29 +280,29 @@ static int tegra_pwm_probe(struct platform_device *pdev)
         * clock register resolutions. Get the configured frequency
         * so that PWM period can be calculated more accurately.
         */
-       pwm->clk_rate = clk_get_rate(pwm->clk);
+       pc->clk_rate = clk_get_rate(pc->clk);
 
        /* Set minimum limit of PWM period for the IP */
-       pwm->min_period_ns =
-           (NSEC_PER_SEC / (pwm->soc->max_frequency >> PWM_DUTY_WIDTH)) + 1;
+       pc->min_period_ns =
+           (NSEC_PER_SEC / (pc->soc->max_frequency >> PWM_DUTY_WIDTH)) + 1;
 
-       pwm->rst = devm_reset_control_get_exclusive(&pdev->dev, "pwm");
-       if (IS_ERR(pwm->rst)) {
-               ret = PTR_ERR(pwm->rst);
+       pc->rst = devm_reset_control_get_exclusive(&pdev->dev, "pwm");
+       if (IS_ERR(pc->rst)) {
+               ret = PTR_ERR(pc->rst);
                dev_err(&pdev->dev, "Reset control is not found: %d\n", ret);
                goto put_pm;
        }
 
-       reset_control_deassert(pwm->rst);
+       reset_control_deassert(pc->rst);
 
-       pwm->chip.dev = &pdev->dev;
-       pwm->chip.ops = &tegra_pwm_ops;
-       pwm->chip.npwm = pwm->soc->num_channels;
+       pc->chip.dev = &pdev->dev;
+       pc->chip.ops = &tegra_pwm_ops;
+       pc->chip.npwm = pc->soc->num_channels;
 
-       ret = pwmchip_add(&pwm->chip);
+       ret = pwmchip_add(&pc->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
-               reset_control_assert(pwm->rst);
+               reset_control_assert(pc->rst);
                goto put_pm;
        }
 
index 5b723a4..48ca0ff 100644 (file)
@@ -216,7 +216,7 @@ static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan)
  * duty_ns   = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE
  */
 static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
-                            int duty_ns, int period_ns)
+                            u64 duty_ns, u64 period_ns)
 {
        struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
        u32 period_cycles, duty_cycles;
@@ -401,12 +401,42 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
        pc->period_cycles[pwm->hwpwm] = 0;
 }
 
+static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                           const struct pwm_state *state)
+{
+       int err;
+       bool enabled = pwm->state.enabled;
+
+       if (state->polarity != pwm->state.polarity) {
+               if (enabled) {
+                       ehrpwm_pwm_disable(chip, pwm);
+                       enabled = false;
+               }
+
+               err = ehrpwm_pwm_set_polarity(chip, pwm, state->polarity);
+               if (err)
+                       return err;
+       }
+
+       if (!state->enabled) {
+               if (enabled)
+                       ehrpwm_pwm_disable(chip, pwm);
+               return 0;
+       }
+
+       err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!enabled)
+               err = ehrpwm_pwm_enable(chip, pwm);
+
+       return err;
+}
+
 static const struct pwm_ops ehrpwm_pwm_ops = {
        .free = ehrpwm_pwm_free,
-       .config = ehrpwm_pwm_config,
-       .set_polarity = ehrpwm_pwm_set_polarity,
-       .enable = ehrpwm_pwm_enable,
-       .disable = ehrpwm_pwm_disable,
+       .apply = ehrpwm_pwm_apply,
        .owner = THIS_MODULE,
 };
 
index 7170a31..f1ff994 100644 (file)
@@ -235,7 +235,7 @@ MODULE_DEVICE_TABLE(of, vt8500_pwm_dt_ids);
 
 static int vt8500_pwm_probe(struct platform_device *pdev)
 {
-       struct vt8500_chip *chip;
+       struct vt8500_chip *vt8500;
        struct device_node *np = pdev->dev.of_node;
        int ret;
 
@@ -244,48 +244,48 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
-       if (chip == NULL)
+       vt8500 = devm_kzalloc(&pdev->dev, sizeof(*vt8500), GFP_KERNEL);
+       if (vt8500 == NULL)
                return -ENOMEM;
 
-       chip->chip.dev = &pdev->dev;
-       chip->chip.ops = &vt8500_pwm_ops;
-       chip->chip.npwm = VT8500_NR_PWMS;
+       vt8500->chip.dev = &pdev->dev;
+       vt8500->chip.ops = &vt8500_pwm_ops;
+       vt8500->chip.npwm = VT8500_NR_PWMS;
 
-       chip->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(chip->clk)) {
+       vt8500->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(vt8500->clk)) {
                dev_err(&pdev->dev, "clock source not specified\n");
-               return PTR_ERR(chip->clk);
+               return PTR_ERR(vt8500->clk);
        }
 
-       chip->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(chip->base))
-               return PTR_ERR(chip->base);
+       vt8500->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(vt8500->base))
+               return PTR_ERR(vt8500->base);
 
-       ret = clk_prepare(chip->clk);
+       ret = clk_prepare(vt8500->clk);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to prepare clock\n");
                return ret;
        }
 
-       ret = pwmchip_add(&chip->chip);
+       ret = pwmchip_add(&vt8500->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to add PWM chip\n");
-               clk_unprepare(chip->clk);
+               clk_unprepare(vt8500->clk);
                return ret;
        }
 
-       platform_set_drvdata(pdev, chip);
+       platform_set_drvdata(pdev, vt8500);
        return ret;
 }
 
 static int vt8500_pwm_remove(struct platform_device *pdev)
 {
-       struct vt8500_chip *chip = platform_get_drvdata(pdev);
+       struct vt8500_chip *vt8500 = platform_get_drvdata(pdev);
 
-       pwmchip_remove(&chip->chip);
+       pwmchip_remove(&vt8500->chip);
 
-       clk_unprepare(chip->clk);
+       clk_unprepare(vt8500->clk);
 
        return 0;
 }
index 676b041..2016062 100644 (file)
@@ -106,6 +106,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
                .vsel_reg = RT4831_REG_VLCM,
                .vsel_mask = RT4831_VOLT_MASK,
                .bypass_reg = RT4831_REG_DSVEN,
+               .bypass_mask = RT4831_DSVMODE_MASK,
                .bypass_val_on = DSV_MODE_BYPASS,
                .bypass_val_off = DSV_MODE_NORMAL,
                .owner = THIS_MODULE,
@@ -126,6 +127,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
                .enable_mask = RT4831_POSEN_MASK,
                .active_discharge_reg = RT4831_REG_DSVEN,
                .active_discharge_mask = RT4831_POSADEN_MASK,
+               .active_discharge_on = RT4831_POSADEN_MASK,
                .owner = THIS_MODULE,
        },
        {
@@ -144,6 +146,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
                .enable_mask = RT4831_NEGEN_MASK,
                .active_discharge_reg = RT4831_REG_DSVEN,
                .active_discharge_mask = RT4831_NEGADEN_MASK,
+               .active_discharge_on = RT4831_NEGADEN_MASK,
                .owner = THIS_MODULE,
        }
 };
index 5ff3867..71ce497 100644 (file)
@@ -32,6 +32,9 @@
 #define MT8183_SCP_CACHESIZE_8KB       BIT(8)
 #define MT8183_SCP_CACHE_CON_WAYEN     BIT(10)
 
+#define MT8186_SCP_L1_SRAM_PD_P1       0x40B0
+#define MT8186_SCP_L1_SRAM_PD_p2       0x40B4
+
 #define MT8192_L2TCM_SRAM_PD_0         0x10C0
 #define MT8192_L2TCM_SRAM_PD_1         0x10C4
 #define MT8192_L2TCM_SRAM_PD_2         0x10C8
index 36e48cf..3860915 100644 (file)
@@ -383,6 +383,27 @@ static void mt8192_power_off_sram(void __iomem *addr)
                writel(GENMASK(i, 0), addr);
 }
 
+static int mt8186_scp_before_load(struct mtk_scp *scp)
+{
+       /* Clear SCP to host interrupt */
+       writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+       /* Reset clocks before loading FW */
+       writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+       writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+       /* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
+       mt8192_power_on_sram(scp->reg_base + MT8183_SCP_SRAM_PDN);
+
+       /* Initialize TCM before loading FW. */
+       writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+       writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+       writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
+       writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+
+       return 0;
+}
+
 static int mt8192_scp_before_load(struct mtk_scp *scp)
 {
        /* clear SPM interrupt, SCP2SPM_IPC_CLR */
@@ -756,15 +777,9 @@ static int scp_probe(struct platform_device *pdev)
        char *fw_name = "scp.img";
        int ret, i;
 
-       rproc = rproc_alloc(dev,
-                           np->name,
-                           &scp_ops,
-                           fw_name,
-                           sizeof(*scp));
-       if (!rproc) {
-               dev_err(dev, "unable to allocate remoteproc\n");
-               return -ENOMEM;
-       }
+       rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
+       if (!rproc)
+               return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
 
        scp = (struct mtk_scp *)rproc->priv;
        scp->rproc = rproc;
@@ -774,46 +789,42 @@ static int scp_probe(struct platform_device *pdev)
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
        scp->sram_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR((__force void *)scp->sram_base)) {
-               dev_err(dev, "Failed to parse and map sram memory\n");
-               ret = PTR_ERR((__force void *)scp->sram_base);
-               goto free_rproc;
-       }
+       if (IS_ERR(scp->sram_base))
+               return dev_err_probe(dev, PTR_ERR(scp->sram_base),
+                                    "Failed to parse and map sram memory\n");
+
        scp->sram_size = resource_size(res);
        scp->sram_phys = res->start;
 
        /* l1tcm is an optional memory region */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
        scp->l1tcm_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR((__force void *)scp->l1tcm_base)) {
-               ret = PTR_ERR((__force void *)scp->l1tcm_base);
+       if (IS_ERR(scp->l1tcm_base)) {
+               ret = PTR_ERR(scp->l1tcm_base);
                if (ret != -EINVAL) {
-                       dev_err(dev, "Failed to map l1tcm memory\n");
-                       goto free_rproc;
+                       return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
                }
        } else {
                scp->l1tcm_size = resource_size(res);
                scp->l1tcm_phys = res->start;
        }
 
-       mutex_init(&scp->send_lock);
-       for (i = 0; i < SCP_IPI_MAX; i++)
-               mutex_init(&scp->ipi_desc[i].lock);
-
        scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
-       if (IS_ERR((__force void *)scp->reg_base)) {
-               dev_err(dev, "Failed to parse and map cfg memory\n");
-               ret = PTR_ERR((__force void *)scp->reg_base);
-               goto destroy_mutex;
-       }
+       if (IS_ERR(scp->reg_base))
+               return dev_err_probe(dev, PTR_ERR(scp->reg_base),
+                                    "Failed to parse and map cfg memory\n");
 
-       ret = scp_map_memory_region(scp);
+       ret = scp->data->scp_clk_get(scp);
        if (ret)
-               goto destroy_mutex;
+               return ret;
 
-       ret = scp->data->scp_clk_get(scp);
+       ret = scp_map_memory_region(scp);
        if (ret)
-               goto release_dev_mem;
+               return ret;
+
+       mutex_init(&scp->send_lock);
+       for (i = 0; i < SCP_IPI_MAX; i++)
+               mutex_init(&scp->ipi_desc[i].lock);
 
        /* register SCP initialization IPI */
        ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
@@ -847,12 +858,9 @@ remove_subdev:
        scp_ipi_unregister(scp, SCP_IPI_INIT);
 release_dev_mem:
        scp_unmap_memory_region(scp);
-destroy_mutex:
        for (i = 0; i < SCP_IPI_MAX; i++)
                mutex_destroy(&scp->ipi_desc[i].lock);
        mutex_destroy(&scp->send_lock);
-free_rproc:
-       rproc_free(rproc);
 
        return ret;
 }
@@ -887,6 +895,19 @@ static const struct mtk_scp_of_data mt8183_of_data = {
        .ipi_buf_offset = 0x7bdb0,
 };
 
+static const struct mtk_scp_of_data mt8186_of_data = {
+       .scp_clk_get = mt8195_scp_clk_get,
+       .scp_before_load = mt8186_scp_before_load,
+       .scp_irq_handler = mt8183_scp_irq_handler,
+       .scp_reset_assert = mt8183_scp_reset_assert,
+       .scp_reset_deassert = mt8183_scp_reset_deassert,
+       .scp_stop = mt8183_scp_stop,
+       .scp_da_to_va = mt8183_scp_da_to_va,
+       .host_to_scp_reg = MT8183_HOST_TO_SCP,
+       .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
+       .ipi_buf_offset = 0x7bdb0,
+};
+
 static const struct mtk_scp_of_data mt8192_of_data = {
        .scp_clk_get = mt8192_scp_clk_get,
        .scp_before_load = mt8192_scp_before_load,
@@ -913,6 +934,7 @@ static const struct mtk_scp_of_data mt8195_of_data = {
 
 static const struct of_device_id mtk_scp_of_match[] = {
        { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
+       { .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
        { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
        { .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
        {},
index 442a388..5280ec9 100644 (file)
@@ -8,6 +8,7 @@
  */
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
+#include <linux/interconnect.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/soc/qcom/qcom_aoss.h>
@@ -51,9 +52,17 @@ int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5)
 {
        int ret;
 
+       ret = icc_set_bw(q6v5->path, 0, UINT_MAX);
+       if (ret < 0) {
+               dev_err(q6v5->dev, "failed to set bandwidth request\n");
+               return ret;
+       }
+
        ret = q6v5_load_state_toggle(q6v5, true);
-       if (ret)
+       if (ret) {
+               icc_set_bw(q6v5->path, 0, 0);
                return ret;
+       }
 
        reinit_completion(&q6v5->start_done);
        reinit_completion(&q6v5->stop_done);
@@ -78,6 +87,9 @@ int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5)
        disable_irq(q6v5->handover_irq);
        q6v5_load_state_toggle(q6v5, false);
 
+       /* Disable interconnect vote, in case handover never happened */
+       icc_set_bw(q6v5->path, 0, 0);
+
        return !q6v5->handover_issued;
 }
 EXPORT_SYMBOL_GPL(qcom_q6v5_unprepare);
@@ -160,6 +172,8 @@ static irqreturn_t q6v5_handover_interrupt(int irq, void *data)
        if (q6v5->handover)
                q6v5->handover(q6v5);
 
+       icc_set_bw(q6v5->path, 0, 0);
+
        q6v5->handover_issued = true;
 
        return IRQ_HANDLED;
@@ -332,6 +346,11 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
                return load_state ? -ENOMEM : -EINVAL;
        }
 
+       q6v5->path = devm_of_icc_get(&pdev->dev, NULL);
+       if (IS_ERR(q6v5->path))
+               return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->path),
+                                    "failed to acquire interconnect path\n");
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(qcom_q6v5_init);
index f35e044..5a859c4 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/completion.h>
 #include <linux/soc/qcom/qcom_aoss.h>
 
+struct icc_path;
 struct rproc;
 struct qcom_smem_state;
 struct qcom_sysmon;
@@ -18,6 +19,8 @@ struct qcom_q6v5 {
        struct qcom_smem_state *state;
        struct qmp *qmp;
 
+       struct icc_path *path;
+
        unsigned stop_bit;
 
        int wdog_irq;
index 098362e..2f3b9f5 100644 (file)
@@ -32,6 +32,7 @@
 
 /* time out value */
 #define ACK_TIMEOUT                    1000
+#define ACK_TIMEOUT_US                 1000000
 #define BOOT_FSM_TIMEOUT               10000
 /* mask values */
 #define EVB_MASK                       GENMASK(27, 4)
@@ -51,6 +52,8 @@
 #define QDSP6SS_CORE_CBCR      0x20
 #define QDSP6SS_SLEEP_CBCR     0x3c
 
+#define QCOM_Q6V5_RPROC_PROXY_PD_MAX   3
+
 struct adsp_pil_data {
        int crash_reason_smem;
        const char *firmware_name;
@@ -58,9 +61,13 @@ struct adsp_pil_data {
        const char *ssr_name;
        const char *sysmon_name;
        int ssctl_id;
+       bool is_wpss;
+       bool auto_boot;
 
        const char **clk_ids;
        int num_clks;
+       const char **proxy_pd_names;
+       const char *load_state;
 };
 
 struct qcom_adsp {
@@ -93,11 +100,151 @@ struct qcom_adsp {
        void *mem_region;
        size_t mem_size;
 
+       struct device *proxy_pds[QCOM_Q6V5_RPROC_PROXY_PD_MAX];
+       size_t proxy_pd_count;
+
        struct qcom_rproc_glink glink_subdev;
        struct qcom_rproc_ssr ssr_subdev;
        struct qcom_sysmon *sysmon;
+
+       int (*shutdown)(struct qcom_adsp *adsp);
 };
 
+static int qcom_rproc_pds_attach(struct device *dev, struct qcom_adsp *adsp,
+                                const char **pd_names)
+{
+       struct device **devs = adsp->proxy_pds;
+       size_t num_pds = 0;
+       int ret;
+       int i;
+
+       if (!pd_names)
+               return 0;
+
+       /* Handle single power domain */
+       if (dev->pm_domain) {
+               devs[0] = dev;
+               pm_runtime_enable(dev);
+               return 1;
+       }
+
+       while (pd_names[num_pds])
+               num_pds++;
+
+       if (num_pds > ARRAY_SIZE(adsp->proxy_pds))
+               return -E2BIG;
+
+       for (i = 0; i < num_pds; i++) {
+               devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+               if (IS_ERR_OR_NULL(devs[i])) {
+                       ret = PTR_ERR(devs[i]) ? : -ENODATA;
+                       goto unroll_attach;
+               }
+       }
+
+       return num_pds;
+
+unroll_attach:
+       for (i--; i >= 0; i--)
+               dev_pm_domain_detach(devs[i], false);
+
+       return ret;
+}
+
+static void qcom_rproc_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+                                 size_t pd_count)
+{
+       struct device *dev = adsp->dev;
+       int i;
+
+       /* Handle single power domain */
+       if (dev->pm_domain && pd_count) {
+               pm_runtime_disable(dev);
+               return;
+       }
+
+       for (i = 0; i < pd_count; i++)
+               dev_pm_domain_detach(pds[i], false);
+}
+
+static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+                                size_t pd_count)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < pd_count; i++) {
+               dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+               ret = pm_runtime_get_sync(pds[i]);
+               if (ret < 0) {
+                       pm_runtime_put_noidle(pds[i]);
+                       dev_pm_genpd_set_performance_state(pds[i], 0);
+                       goto unroll_pd_votes;
+               }
+       }
+
+       return 0;
+
+unroll_pd_votes:
+       for (i--; i >= 0; i--) {
+               dev_pm_genpd_set_performance_state(pds[i], 0);
+               pm_runtime_put(pds[i]);
+       }
+
+       return ret;
+}
+
+static void qcom_rproc_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+                                  size_t pd_count)
+{
+       int i;
+
+       for (i = 0; i < pd_count; i++) {
+               dev_pm_genpd_set_performance_state(pds[i], 0);
+               pm_runtime_put(pds[i]);
+       }
+}
+
+static int qcom_wpss_shutdown(struct qcom_adsp *adsp)
+{
+       unsigned int val;
+
+       regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
+
+       /* Wait for halt ACK from QDSP6 */
+       regmap_read_poll_timeout(adsp->halt_map,
+                                adsp->halt_lpass + LPASS_HALTACK_REG, val,
+                                val, 1000, ACK_TIMEOUT_US);
+
+       /* Assert the WPSS PDC Reset */
+       reset_control_assert(adsp->pdc_sync_reset);
+
+       /* Place the WPSS processor into reset */
+       reset_control_assert(adsp->restart);
+
+       /* wait after asserting subsystem restart from AOSS */
+       usleep_range(200, 205);
+
+       /* Remove the WPSS reset */
+       reset_control_deassert(adsp->restart);
+
+       /* De-assert the WPSS PDC Reset */
+       reset_control_deassert(adsp->pdc_sync_reset);
+
+       usleep_range(100, 105);
+
+       clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
+
+       regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
+
+       /* Wait for halt ACK from QDSP6 */
+       regmap_read_poll_timeout(adsp->halt_map,
+                                adsp->halt_lpass + LPASS_HALTACK_REG, val,
+                                !val, 1000, ACK_TIMEOUT_US);
+
+       return 0;
+}
+
 static int qcom_adsp_shutdown(struct qcom_adsp *adsp)
 {
        unsigned long timeout;
@@ -193,12 +340,10 @@ static int adsp_start(struct rproc *rproc)
        if (ret)
                goto disable_irqs;
 
-       dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
-       ret = pm_runtime_get_sync(adsp->dev);
-       if (ret) {
-               pm_runtime_put_noidle(adsp->dev);
+       ret = qcom_rproc_pds_enable(adsp, adsp->proxy_pds,
+                                   adsp->proxy_pd_count);
+       if (ret < 0)
                goto disable_xo_clk;
-       }
 
        ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
        if (ret) {
@@ -243,8 +388,7 @@ static int adsp_start(struct rproc *rproc)
 disable_adsp_clks:
        clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
 disable_power_domain:
-       dev_pm_genpd_set_performance_state(adsp->dev, 0);
-       pm_runtime_put(adsp->dev);
+       qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
 disable_xo_clk:
        clk_disable_unprepare(adsp->xo);
 disable_irqs:
@@ -258,8 +402,7 @@ static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
        struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
 
        clk_disable_unprepare(adsp->xo);
-       dev_pm_genpd_set_performance_state(adsp->dev, 0);
-       pm_runtime_put(adsp->dev);
+       qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
 }
 
 static int adsp_stop(struct rproc *rproc)
@@ -272,7 +415,7 @@ static int adsp_stop(struct rproc *rproc)
        if (ret == -ETIMEDOUT)
                dev_err(adsp->dev, "timed out on wait\n");
 
-       ret = qcom_adsp_shutdown(adsp);
+       ret = adsp->shutdown(adsp);
        if (ret)
                dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
 
@@ -408,6 +551,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
        }
 
        ret = of_address_to_resource(node, 0, &r);
+       of_node_put(node);
        if (ret)
                return ret;
 
@@ -427,6 +571,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
 static int adsp_probe(struct platform_device *pdev)
 {
        const struct adsp_pil_data *desc;
+       const char *firmware_name;
        struct qcom_adsp *adsp;
        struct rproc *rproc;
        int ret;
@@ -435,12 +580,22 @@ static int adsp_probe(struct platform_device *pdev)
        if (!desc)
                return -EINVAL;
 
+       firmware_name = desc->firmware_name;
+       ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+                                     &firmware_name);
+       if (ret < 0 && ret != -EINVAL) {
+               dev_err(&pdev->dev, "unable to read firmware-name\n");
+               return ret;
+       }
+
        rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
-                           desc->firmware_name, sizeof(*adsp));
+                           firmware_name, sizeof(*adsp));
        if (!rproc) {
                dev_err(&pdev->dev, "unable to allocate remoteproc\n");
                return -ENOMEM;
        }
+
+       rproc->auto_boot = desc->auto_boot;
        rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
 
        adsp = (struct qcom_adsp *)rproc->priv;
@@ -449,6 +604,11 @@ static int adsp_probe(struct platform_device *pdev)
        adsp->info_name = desc->sysmon_name;
        platform_set_drvdata(pdev, adsp);
 
+       if (desc->is_wpss)
+               adsp->shutdown = qcom_wpss_shutdown;
+       else
+               adsp->shutdown = qcom_adsp_shutdown;
+
        ret = adsp_alloc_memory_region(adsp);
        if (ret)
                goto free_rproc;
@@ -457,7 +617,13 @@ static int adsp_probe(struct platform_device *pdev)
        if (ret)
                goto free_rproc;
 
-       pm_runtime_enable(adsp->dev);
+       ret = qcom_rproc_pds_attach(adsp->dev, adsp,
+                                   desc->proxy_pd_names);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
+               goto free_rproc;
+       }
+       adsp->proxy_pd_count = ret;
 
        ret = adsp_init_reset(adsp);
        if (ret)
@@ -467,8 +633,8 @@ static int adsp_probe(struct platform_device *pdev)
        if (ret)
                goto disable_pm;
 
-       ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, NULL,
-                            qcom_adsp_pil_handover);
+       ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
+                            desc->load_state, qcom_adsp_pil_handover);
        if (ret)
                goto disable_pm;
 
@@ -489,7 +655,8 @@ static int adsp_probe(struct platform_device *pdev)
        return 0;
 
 disable_pm:
-       pm_runtime_disable(adsp->dev);
+       qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+
 free_rproc:
        rproc_free(rproc);
 
@@ -506,7 +673,7 @@ static int adsp_remove(struct platform_device *pdev)
        qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
        qcom_remove_sysmon_subdev(adsp->sysmon);
        qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
-       pm_runtime_disable(adsp->dev);
+       qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
        rproc_free(adsp->rproc);
 
        return 0;
@@ -518,11 +685,16 @@ static const struct adsp_pil_data adsp_resource_init = {
        .ssr_name = "lpass",
        .sysmon_name = "adsp",
        .ssctl_id = 0x14,
+       .is_wpss = false,
+       .auto_boot = true,
        .clk_ids = (const char*[]) {
                "sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
                "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL
        },
        .num_clks = 7,
+       .proxy_pd_names = (const char*[]) {
+               "cx", NULL
+       },
 };
 
 static const struct adsp_pil_data cdsp_resource_init = {
@@ -531,15 +703,39 @@ static const struct adsp_pil_data cdsp_resource_init = {
        .ssr_name = "cdsp",
        .sysmon_name = "cdsp",
        .ssctl_id = 0x17,
+       .is_wpss = false,
+       .auto_boot = true,
        .clk_ids = (const char*[]) {
                "sway", "tbu", "bimc", "ahb_aon", "q6ss_slave", "q6ss_master",
                "q6_axim", NULL
        },
        .num_clks = 7,
+       .proxy_pd_names = (const char*[]) {
+               "cx", NULL
+       },
+};
+
+static const struct adsp_pil_data wpss_resource_init = {
+       .crash_reason_smem = 626,
+       .firmware_name = "wpss.mdt",
+       .ssr_name = "wpss",
+       .sysmon_name = "wpss",
+       .ssctl_id = 0x19,
+       .is_wpss = true,
+       .auto_boot = false,
+       .load_state = "wpss",
+       .clk_ids = (const char*[]) {
+               "ahb_bdg", "ahb", "rscp", NULL
+       },
+       .num_clks = 3,
+       .proxy_pd_names = (const char*[]) {
+               "cx", "mx", NULL
+       },
 };
 
 static const struct of_device_id adsp_of_match[] = {
        { .compatible = "qcom,qcs404-cdsp-pil", .data = &cdsp_resource_init },
+       { .compatible = "qcom,sc7280-wpss-pil", .data = &wpss_resource_init },
        { .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init },
        { },
 };
index a2c231a..af217de 100644 (file)
@@ -218,6 +218,7 @@ struct q6v5 {
        struct qcom_rproc_subdev smd_subdev;
        struct qcom_rproc_ssr ssr_subdev;
        struct qcom_sysmon *sysmon;
+       struct platform_device *bam_dmux;
        bool need_mem_protection;
        bool has_alt_reset;
        bool has_mba_logs;
@@ -1807,18 +1808,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
         * reserved memory regions from device's memory-region property.
         */
        child = of_get_child_by_name(qproc->dev->of_node, "mba");
-       if (!child)
+       if (!child) {
                node = of_parse_phandle(qproc->dev->of_node,
                                        "memory-region", 0);
-       else
+       } else {
                node = of_parse_phandle(child, "memory-region", 0);
+               of_node_put(child);
+       }
 
        ret = of_address_to_resource(node, 0, &r);
+       of_node_put(node);
        if (ret) {
                dev_err(qproc->dev, "unable to resolve mba region\n");
                return ret;
        }
-       of_node_put(node);
 
        qproc->mba_phys = r.start;
        qproc->mba_size = resource_size(&r);
@@ -1829,14 +1832,15 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
        } else {
                child = of_get_child_by_name(qproc->dev->of_node, "mpss");
                node = of_parse_phandle(child, "memory-region", 0);
+               of_node_put(child);
        }
 
        ret = of_address_to_resource(node, 0, &r);
+       of_node_put(node);
        if (ret) {
                dev_err(qproc->dev, "unable to resolve mpss region\n");
                return ret;
        }
-       of_node_put(node);
 
        qproc->mpss_phys = qproc->mpss_reloc = r.start;
        qproc->mpss_size = resource_size(&r);
@@ -1847,6 +1851,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
 static int q6v5_probe(struct platform_device *pdev)
 {
        const struct rproc_hexagon_res *desc;
+       struct device_node *node;
        struct q6v5 *qproc;
        struct rproc *rproc;
        const char *mba_image;
@@ -1990,6 +1995,10 @@ static int q6v5_probe(struct platform_device *pdev)
        if (ret)
                goto remove_sysmon_subdev;
 
+       node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
+       qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
+       of_node_put(node);
+
        return 0;
 
 remove_sysmon_subdev:
@@ -2011,6 +2020,8 @@ static int q6v5_remove(struct platform_device *pdev)
        struct q6v5 *qproc = platform_get_drvdata(pdev);
        struct rproc *rproc = qproc->rproc;
 
+       if (qproc->bam_dmux)
+               of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
        rproc_del(rproc);
 
        qcom_q6v5_deinit(&qproc->q6v5);
index 80bbafe..9a223d3 100644 (file)
@@ -500,6 +500,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
        }
 
        ret = of_address_to_resource(node, 0, &r);
+       of_node_put(node);
        if (ret)
                return ret;
 
index 4ad98b0..906ff3c 100644 (file)
@@ -42,7 +42,7 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_
                    rproc->state != RPROC_ATTACHED)
                        return -EINVAL;
 
-               rproc_shutdown(rproc);
+               ret = rproc_shutdown(rproc);
        } else if (!strncmp(cmd, "detach", len)) {
                if (rproc->state != RPROC_ATTACHED)
                        return -EINVAL;
index 69f51ac..c510125 100644 (file)
@@ -2061,16 +2061,18 @@ EXPORT_SYMBOL(rproc_boot);
  *   which means that the @rproc handle stays valid even after rproc_shutdown()
  *   returns, and users can still use it with a subsequent rproc_boot(), if
  *   needed.
+ *
+ * Return: 0 on success, and an appropriate error value otherwise
  */
-void rproc_shutdown(struct rproc *rproc)
+int rproc_shutdown(struct rproc *rproc)
 {
        struct device *dev = &rproc->dev;
-       int ret;
+       int ret = 0;
 
        ret = mutex_lock_interruptible(&rproc->lock);
        if (ret) {
                dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
-               return;
+               return ret;
        }
 
        /* if the remote proc is still needed, bail out */
@@ -2097,6 +2099,7 @@ void rproc_shutdown(struct rproc *rproc)
        rproc->table_ptr = NULL;
 out:
        mutex_unlock(&rproc->lock);
+       return ret;
 }
 EXPORT_SYMBOL(rproc_shutdown);
 
index b5a1e3b..5819304 100644 (file)
@@ -76,7 +76,7 @@ static ssize_t rproc_coredump_write(struct file *filp,
        int ret, err = 0;
        char buf[20];
 
-       if (count > sizeof(buf))
+       if (count < 1 || count > sizeof(buf))
                return -EINVAL;
 
        ret = copy_from_user(buf, user_buf, count);
index a328e63..72d4d3d 100644 (file)
@@ -84,7 +84,6 @@ static inline void  rproc_char_device_remove(struct rproc *rproc)
 void rproc_free_vring(struct rproc_vring *rvring);
 int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
 
-void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
 phys_addr_t rproc_va_to_pa(void *cpu_addr);
 int rproc_trigger_recovery(struct rproc *rproc);
 
index ea8b89f..51a04bc 100644 (file)
@@ -206,7 +206,7 @@ static ssize_t state_store(struct device *dev,
                    rproc->state != RPROC_ATTACHED)
                        return -EINVAL;
 
-               rproc_shutdown(rproc);
+               ret = rproc_shutdown(rproc);
        } else if (sysfs_streq(buf, "detach")) {
                if (rproc->state != RPROC_ATTACHED)
                        return -EINVAL;
@@ -230,6 +230,22 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RO(name);
 
+static umode_t rproc_is_visible(struct kobject *kobj, struct attribute *attr,
+                               int n)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct rproc *rproc = to_rproc(dev);
+       umode_t mode = attr->mode;
+
+       if (rproc->sysfs_read_only && (attr == &dev_attr_recovery.attr ||
+                                      attr == &dev_attr_firmware.attr ||
+                                      attr == &dev_attr_state.attr ||
+                                      attr == &dev_attr_coredump.attr))
+               mode = 0444;
+
+       return mode;
+}
+
 static struct attribute *rproc_attrs[] = {
        &dev_attr_coredump.attr,
        &dev_attr_recovery.attr,
@@ -240,7 +256,8 @@ static struct attribute *rproc_attrs[] = {
 };
 
 static const struct attribute_group rproc_devgroup = {
-       .attrs = rproc_attrs
+       .attrs = rproc_attrs,
+       .is_visible = rproc_is_visible,
 };
 
 static const struct attribute_group *rproc_devgroups[] = {
index 939c5d9..eb9c64f 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * TI K3 DSP Remote Processor(s) driver
  *
- * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
  *     Suman Anna <s-anna@ti.com>
  */
 
@@ -216,6 +216,43 @@ lreset:
        return ret;
 }
 
+static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct mbox_client *client = &kproc->client;
+       struct device *dev = kproc->dev;
+       int ret;
+
+       client->dev = dev;
+       client->tx_done = NULL;
+       client->rx_callback = k3_dsp_rproc_mbox_callback;
+       client->tx_block = false;
+       client->knows_txdone = false;
+
+       kproc->mbox = mbox_request_channel(client, 0);
+       if (IS_ERR(kproc->mbox)) {
+               ret = -EBUSY;
+               dev_err(dev, "mbox_request_channel failed: %ld\n",
+                       PTR_ERR(kproc->mbox));
+               return ret;
+       }
+
+       /*
+        * Ping the remote processor, this is only for sanity-sake for now;
+        * there is no functional effect whatsoever.
+        *
+        * Note that the reply will _not_ arrive immediately: this message
+        * will wait in the mailbox fifo until the remote processor is booted.
+        */
+       ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+       if (ret < 0) {
+               dev_err(dev, "mbox_send_message failed: %d\n", ret);
+               mbox_free_channel(kproc->mbox);
+               return ret;
+       }
+
+       return 0;
+}
 /*
  * The C66x DSP cores have a local reset that affects only the CPU, and a
  * generic module reset that powers on the device and allows the DSP internal
@@ -223,7 +260,8 @@ lreset:
  * used to release the global reset on C66x DSPs to allow loading into the DSP
  * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
  * firmware loading, and is followed by the .start() ops after loading to
- * actually let the C66x DSP cores run.
+ * actually let the C66x DSP cores run. This callback is invoked only in
+ * remoteproc mode.
  */
 static int k3_dsp_rproc_prepare(struct rproc *rproc)
 {
@@ -247,7 +285,7 @@ static int k3_dsp_rproc_prepare(struct rproc *rproc)
  * powering down the C66x DSP cores. The cores themselves are only halted in the
  * .stop() callback through the local reset, and the .unprepare() ops is invoked
  * by the remoteproc core after the remoteproc is stopped to balance the global
- * reset.
+ * reset. This callback is invoked only in remoteproc mode.
  */
 static int k3_dsp_rproc_unprepare(struct rproc *rproc)
 {
@@ -268,42 +306,18 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
  *
  * This function will be invoked only after the firmware for this rproc
  * was loaded, parsed successfully, and all of its resource requirements
- * were met.
+ * were met. This callback is invoked only in remoteproc mode.
  */
 static int k3_dsp_rproc_start(struct rproc *rproc)
 {
        struct k3_dsp_rproc *kproc = rproc->priv;
-       struct mbox_client *client = &kproc->client;
        struct device *dev = kproc->dev;
        u32 boot_addr;
        int ret;
 
-       client->dev = dev;
-       client->tx_done = NULL;
-       client->rx_callback = k3_dsp_rproc_mbox_callback;
-       client->tx_block = false;
-       client->knows_txdone = false;
-
-       kproc->mbox = mbox_request_channel(client, 0);
-       if (IS_ERR(kproc->mbox)) {
-               ret = -EBUSY;
-               dev_err(dev, "mbox_request_channel failed: %ld\n",
-                       PTR_ERR(kproc->mbox));
+       ret = k3_dsp_rproc_request_mbox(rproc);
+       if (ret)
                return ret;
-       }
-
-       /*
-        * Ping the remote processor, this is only for sanity-sake for now;
-        * there is no functional effect whatsoever.
-        *
-        * Note that the reply will _not_ arrive immediately: this message
-        * will wait in the mailbox fifo until the remote processor is booted.
-        */
-       ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
-       if (ret < 0) {
-               dev_err(dev, "mbox_send_message failed: %d\n", ret);
-               goto put_mbox;
-       }
 
        boot_addr = rproc->bootaddr;
        if (boot_addr & (kproc->data->boot_align_addr - 1)) {
@@ -333,7 +347,7 @@ put_mbox:
  * Stop the DSP remote processor.
  *
  * This function puts the DSP processor into reset, and finishes processing
- * of any pending messages.
+ * of any pending messages. This callback is invoked only in remoteproc mode.
  */
 static int k3_dsp_rproc_stop(struct rproc *rproc)
 {
@@ -346,6 +360,78 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
        return 0;
 }
 
+/*
+ * Attach to a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the DSP core. This callback is invoked only in IPC-only
+ * mode.
+ */
+static int k3_dsp_rproc_attach(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+       int ret;
+
+       ret = k3_dsp_rproc_request_mbox(rproc);
+       if (ret)
+               return ret;
+
+       dev_info(dev, "DSP initialized in IPC-only mode\n");
+       return 0;
+}
+
+/*
+ * Detach from a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the DSP core is not stopped and will
+ * be left to continue to run its booted firmware. This callback is invoked only
+ * in IPC-only mode.
+ */
+static int k3_dsp_rproc_detach(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+
+       mbox_free_channel(kproc->mbox);
+       dev_info(dev, "DSP deinitialized in IPC-only mode\n");
+       return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property. This callback is invoked only in
+ * IPC-only mode.
+ */
+static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
+                                                         size_t *rsc_table_sz)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+
+       if (!kproc->rmem[0].cpu_addr) {
+               dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /*
+        * NOTE: The resource table size is currently hard-coded to a maximum
+        * of 256 bytes. The most common resource table usage for K3 firmwares
+        * is to only have the vdev resource entry and an optional trace entry.
+        * The exact size could be computed based on resource table address, but
+        * the hard-coded value suffices to support the IPC-only mode.
+        */
+       *rsc_table_sz = 256;
+       return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
 /*
  * Custom function to translate a DSP device address (internal RAMs only) to a
  * kernel virtual address.  The DSPs can access their RAMs at either an internal
@@ -592,6 +678,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
        struct k3_dsp_rproc *kproc;
        struct rproc *rproc;
        const char *fw_name;
+       bool p_state = false;
        int ret = 0;
        int ret1;
 
@@ -670,19 +757,43 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
                goto release_tsp;
        }
 
-       /*
-        * ensure the DSP local reset is asserted to ensure the DSP doesn't
-        * execute bogus code in .prepare() when the module reset is released.
-        */
-       if (data->uses_lreset) {
-               ret = reset_control_status(kproc->reset);
-               if (ret < 0) {
-                       dev_err(dev, "failed to get reset status, status = %d\n",
-                               ret);
-                       goto release_mem;
-               } else if (ret == 0) {
-                       dev_warn(dev, "local reset is deasserted for device\n");
-                       k3_dsp_rproc_reset(kproc);
+       ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+                                              NULL, &p_state);
+       if (ret) {
+               dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+                       ret);
+               goto release_mem;
+       }
+
+       /* configure J721E devices for either remoteproc or IPC-only mode */
+       if (p_state) {
+               dev_info(dev, "configured DSP for IPC-only mode\n");
+               rproc->state = RPROC_DETACHED;
+               /* override rproc ops with only required IPC-only mode ops */
+               rproc->ops->prepare = NULL;
+               rproc->ops->unprepare = NULL;
+               rproc->ops->start = NULL;
+               rproc->ops->stop = NULL;
+               rproc->ops->attach = k3_dsp_rproc_attach;
+               rproc->ops->detach = k3_dsp_rproc_detach;
+               rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
+       } else {
+               dev_info(dev, "configured DSP for remoteproc mode\n");
+               /*
+                * ensure the DSP local reset is asserted to ensure the DSP
+                * doesn't execute bogus code in .prepare() when the module
+                * reset is released.
+                */
+               if (data->uses_lreset) {
+                       ret = reset_control_status(kproc->reset);
+                       if (ret < 0) {
+                               dev_err(dev, "failed to get reset status, status = %d\n",
+                                       ret);
+                               goto release_mem;
+                       } else if (ret == 0) {
+                               dev_warn(dev, "local reset is deasserted for device\n");
+                               k3_dsp_rproc_reset(kproc);
+                       }
                }
        }
 
@@ -717,9 +828,18 @@ free_rproc:
 static int k3_dsp_rproc_remove(struct platform_device *pdev)
 {
        struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+       struct rproc *rproc = kproc->rproc;
        struct device *dev = &pdev->dev;
        int ret;
 
+       if (rproc->state == RPROC_ATTACHED) {
+               ret = rproc_detach(rproc);
+               if (ret) {
+                       dev_err(dev, "failed to detach proc, ret = %d\n", ret);
+                       return ret;
+               }
+       }
+
        rproc_del(kproc->rproc);
 
        ret = ti_sci_proc_release(kproc->tsp);
index 969531c..4840ad9 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * TI K3 R5F (MCU) Remote Processor driver
  *
- * Copyright (C) 2017-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
  *     Suman Anna <s-anna@ti.com>
  */
 
@@ -376,6 +376,44 @@ static inline int k3_r5_core_run(struct k3_r5_core *core)
                                       0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
 }
 
+static int k3_r5_rproc_request_mbox(struct rproc *rproc)
+{
+       struct k3_r5_rproc *kproc = rproc->priv;
+       struct mbox_client *client = &kproc->client;
+       struct device *dev = kproc->dev;
+       int ret;
+
+       client->dev = dev;
+       client->tx_done = NULL;
+       client->rx_callback = k3_r5_rproc_mbox_callback;
+       client->tx_block = false;
+       client->knows_txdone = false;
+
+       kproc->mbox = mbox_request_channel(client, 0);
+       if (IS_ERR(kproc->mbox)) {
+               ret = -EBUSY;
+               dev_err(dev, "mbox_request_channel failed: %ld\n",
+                       PTR_ERR(kproc->mbox));
+               return ret;
+       }
+
+       /*
+        * Ping the remote processor, this is only for sanity-sake for now;
+        * there is no functional effect whatsoever.
+        *
+        * Note that the reply will _not_ arrive immediately: this message
+        * will wait in the mailbox fifo until the remote processor is booted.
+        */
+       ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+       if (ret < 0) {
+               dev_err(dev, "mbox_send_message failed: %d\n", ret);
+               mbox_free_channel(kproc->mbox);
+               return ret;
+       }
+
+       return 0;
+}
+
 /*
  * The R5F cores have controls for both a reset and a halt/run. The code
  * execution from DDR requires the initial boot-strapping code to be run
@@ -390,6 +428,7 @@ static inline int k3_r5_core_run(struct k3_r5_core *core)
  * private to each core. Only Core0 needs to be unhalted for running the
  * cluster in this mode. The function uses the same reset logic as LockStep
  * mode for this (though the behavior is agnostic of the reset release order).
+ * This callback is invoked only in remoteproc mode.
  */
 static int k3_r5_rproc_prepare(struct rproc *rproc)
 {
@@ -455,7 +494,8 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
  * both cores. The access is made possible only with releasing the resets for
  * both cores, but with only Core0 unhalted. This function re-uses the same
  * reset assert logic as LockStep mode for this mode (though the behavior is
- * agnostic of the reset assert order).
+ * agnostic of the reset assert order). This callback is invoked only in
+ * remoteproc mode.
  */
 static int k3_r5_rproc_unprepare(struct rproc *rproc)
 {
@@ -489,44 +529,21 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
  *
  * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
  * code, so only Core0 needs to be unhalted. The function uses the same logic
- * flow as Split-mode for this.
+ * flow as Split-mode for this. This callback is invoked only in remoteproc
+ * mode.
  */
 static int k3_r5_rproc_start(struct rproc *rproc)
 {
        struct k3_r5_rproc *kproc = rproc->priv;
        struct k3_r5_cluster *cluster = kproc->cluster;
-       struct mbox_client *client = &kproc->client;
        struct device *dev = kproc->dev;
        struct k3_r5_core *core;
        u32 boot_addr;
        int ret;
 
-       client->dev = dev;
-       client->tx_done = NULL;
-       client->rx_callback = k3_r5_rproc_mbox_callback;
-       client->tx_block = false;
-       client->knows_txdone = false;
-
-       kproc->mbox = mbox_request_channel(client, 0);
-       if (IS_ERR(kproc->mbox)) {
-               ret = -EBUSY;
-               dev_err(dev, "mbox_request_channel failed: %ld\n",
-                       PTR_ERR(kproc->mbox));
+       ret = k3_r5_rproc_request_mbox(rproc);
+       if (ret)
                return ret;
-       }
-
-       /*
-        * Ping the remote processor, this is only for sanity-sake for now;
-        * there is no functional effect whatsoever.
-        *
-        * Note that the reply will _not_ arrive immediately: this message
-        * will wait in the mailbox fifo until the remote processor is booted.
-        */
-       ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
-       if (ret < 0) {
-               dev_err(dev, "mbox_send_message failed: %d\n", ret);
-               goto put_mbox;
-       }
 
        boot_addr = rproc->bootaddr;
        /* TODO: add boot_addr sanity checking */
@@ -584,7 +601,8 @@ put_mbox:
  * be done here, but is preferred to be done in the .unprepare() ops - this
  * maintains the symmetric behavior between the .start(), .stop(), .prepare()
  * and .unprepare() ops, and also balances them well between sysfs 'state'
- * flow and device bind/unbind or module removal.
+ * flow and device bind/unbind or module removal. This callback is invoked
+ * only in remoteproc mode.
  */
 static int k3_r5_rproc_stop(struct rproc *rproc)
 {
@@ -621,6 +639,78 @@ out:
        return ret;
 }
 
+/*
+ * Attach to a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the R5F cores in IPC-only mode. This callback is invoked
+ * only in IPC-only mode.
+ */
+static int k3_r5_rproc_attach(struct rproc *rproc)
+{
+       struct k3_r5_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+       int ret;
+
+       ret = k3_r5_rproc_request_mbox(rproc);
+       if (ret)
+               return ret;
+
+       dev_info(dev, "R5F core initialized in IPC-only mode\n");
+       return 0;
+}
+
+/*
+ * Detach from a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the R5F cores are not stopped and
+ * will be left in booted state in IPC-only mode. This callback is invoked
+ * only in IPC-only mode.
+ */
+static int k3_r5_rproc_detach(struct rproc *rproc)
+{
+       struct k3_r5_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+
+       mbox_free_channel(kproc->mbox);
+       dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
+       return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property. This callback is invoked only in
+ * IPC-only mode.
+ */
+static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
+                                                        size_t *rsc_table_sz)
+{
+       struct k3_r5_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+
+       if (!kproc->rmem[0].cpu_addr) {
+               dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /*
+        * NOTE: The resource table size is currently hard-coded to a maximum
+        * of 256 bytes. The most common resource table usage for K3 firmwares
+        * is to only have the vdev resource entry and an optional trace entry.
+        * The exact size could be computed based on resource table address, but
+        * the hard-coded value suffices to support the IPC-only mode.
+        */
+       *rsc_table_sz = 256;
+       return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
 /*
  * Internal Memory translation helper
  *
@@ -1000,6 +1090,116 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
        }
 }
 
+/*
+ * This function checks and configures a R5F core for IPC-only or remoteproc
+ * mode. The driver is configured to be in IPC-only mode for a R5F core when
+ * the core has been loaded and started by a bootloader. The IPC-only mode is
+ * detected by querying the System Firmware for reset, power on and halt status
+ * and ensuring that the core is running. Any incomplete steps at bootloader
+ * are validated and errored out.
+ *
+ * In IPC-only mode, the driver state flags for ATCM, BTCM and LOCZRAMA settings
+ * and cluster mode parsed originally from kernel DT are updated to reflect the
+ * actual values configured by bootloader. The driver internal device memory
+ * addresses for TCMs are also updated.
+ */
+static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+{
+       struct k3_r5_cluster *cluster = kproc->cluster;
+       struct k3_r5_core *core = kproc->core;
+       struct device *cdev = core->dev;
+       bool r_state = false, c_state = false;
+       u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
+       u64 boot_vec = 0;
+       u32 atcm_enable, btcm_enable, loczrama;
+       struct k3_r5_core *core0;
+       enum cluster_mode mode;
+       int ret;
+
+       core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+
+       ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
+                                             &r_state, &c_state);
+       if (ret) {
+               dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+                       ret);
+               return ret;
+       }
+       if (r_state != c_state) {
+               dev_warn(cdev, "R5F core may have been powered on by a different host, programmed state (%d) != actual state (%d)\n",
+                        r_state, c_state);
+       }
+
+       ret = reset_control_status(core->reset);
+       if (ret < 0) {
+               dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
+                       ret);
+               return ret;
+       }
+
+       ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+                                    &stat);
+       if (ret < 0) {
+               dev_err(cdev, "failed to get initial processor status, ret = %d\n",
+                       ret);
+               return ret;
+       }
+       atcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_ATCM_EN ?  1 : 0;
+       btcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_BTCM_EN ?  1 : 0;
+       loczrama = cfg & PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE ?  1 : 0;
+       if (cluster->soc_data->single_cpu_mode) {
+               mode = cfg & PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE ?
+                               CLUSTER_MODE_SINGLECPU : CLUSTER_MODE_SPLIT;
+       } else {
+               mode = cfg & PROC_BOOT_CFG_FLAG_R5_LOCKSTEP ?
+                               CLUSTER_MODE_LOCKSTEP : CLUSTER_MODE_SPLIT;
+       }
+       halted = ctrl & PROC_BOOT_CTRL_FLAG_R5_CORE_HALT;
+
+       /*
+        * IPC-only mode detection requires both local and module resets to
+        * be deasserted and R5F core to be unhalted. Local reset status is
+        * irrelevant if module reset is asserted (POR value has local reset
+        * deasserted), and is deemed as remoteproc mode
+        */
+       if (c_state && !ret && !halted) {
+               dev_info(cdev, "configured R5F for IPC-only mode\n");
+               kproc->rproc->state = RPROC_DETACHED;
+               ret = 1;
+               /* override rproc ops with only required IPC-only mode ops */
+               kproc->rproc->ops->prepare = NULL;
+               kproc->rproc->ops->unprepare = NULL;
+               kproc->rproc->ops->start = NULL;
+               kproc->rproc->ops->stop = NULL;
+               kproc->rproc->ops->attach = k3_r5_rproc_attach;
+               kproc->rproc->ops->detach = k3_r5_rproc_detach;
+               kproc->rproc->ops->get_loaded_rsc_table =
+                                               k3_r5_get_loaded_rsc_table;
+       } else if (!c_state) {
+               dev_info(cdev, "configured R5F for remoteproc mode\n");
+               ret = 0;
+       } else {
+               dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
+                       !ret ? "deasserted" : "asserted",
+                       c_state ? "deasserted" : "asserted",
+                       halted ? "halted" : "unhalted");
+               ret = -EINVAL;
+       }
+
+       /* fixup TCMs, cluster & core flags to actual values in IPC-only mode */
+       if (ret > 0) {
+               if (core == core0)
+                       cluster->mode = mode;
+               core->atcm_enable = atcm_enable;
+               core->btcm_enable = btcm_enable;
+               core->loczrama = loczrama;
+               core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
+               core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
+       }
+
+       return ret;
+}
+
 static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
 {
        struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
@@ -1009,7 +1209,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
        struct device *cdev;
        const char *fw_name;
        struct rproc *rproc;
-       int ret;
+       int ret, ret1;
 
        core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
        list_for_each_entry(core, &cluster->cores, elem) {
@@ -1040,6 +1240,12 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
                kproc->rproc = rproc;
                core->rproc = rproc;
 
+               ret = k3_r5_rproc_configure_mode(kproc);
+               if (ret < 0)
+                       goto err_config;
+               if (ret)
+                       goto init_rmem;
+
                ret = k3_r5_rproc_configure(kproc);
                if (ret) {
                        dev_err(dev, "initial configure failed, ret = %d\n",
@@ -1047,6 +1253,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
                        goto err_config;
                }
 
+init_rmem:
                k3_r5_adjust_tcm_sizes(kproc);
 
                ret = k3_r5_reserved_mem_init(kproc);
@@ -1071,6 +1278,15 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
        return 0;
 
 err_split:
+       if (rproc->state == RPROC_ATTACHED) {
+               ret1 = rproc_detach(rproc);
+               if (ret1) {
+                       dev_err(kproc->dev, "failed to detach rproc, ret = %d\n",
+                               ret1);
+                       return ret1;
+               }
+       }
+
        rproc_del(rproc);
 err_add:
        k3_r5_reserved_mem_exit(kproc);
@@ -1094,6 +1310,7 @@ static void k3_r5_cluster_rproc_exit(void *data)
        struct k3_r5_rproc *kproc;
        struct k3_r5_core *core;
        struct rproc *rproc;
+       int ret;
 
        /*
         * lockstep mode and single-cpu modes have only one rproc associated
@@ -1109,6 +1326,14 @@ static void k3_r5_cluster_rproc_exit(void *data)
                rproc = core->rproc;
                kproc = rproc->priv;
 
+               if (rproc->state == RPROC_ATTACHED) {
+                       ret = rproc_detach(rproc);
+                       if (ret) {
+                               dev_err(kproc->dev, "failed to detach rproc, ret = %d\n", ret);
+                               return;
+                       }
+               }
+
                rproc_del(rproc);
 
                k3_r5_reserved_mem_exit(kproc);
index 484f760..a0c204c 100644 (file)
@@ -163,6 +163,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
        }
 
        rproc->auto_boot = false;
+       rproc->sysfs_read_only = true;
 
        wkupm3 = rproc->priv;
        wkupm3->rproc = rproc;
index 0b4407a..d379586 100644 (file)
@@ -15,6 +15,14 @@ config RPMSG_CHAR
          in /dev. They make it possible for user-space programs to send and
          receive rpmsg packets.
 
+config RPMSG_CTRL
+       tristate "RPMSG control interface"
+       depends on RPMSG && ( RPMSG_CHAR || RPMSG_CHAR=n )
+       help
+         Say Y here to enable the support of the /dev/rpmsg_ctrlX API. This API
+         allows user-space programs to create endpoints with specific service name,
+         source and destination addresses.
+
 config RPMSG_NS
        tristate "RPMSG name service announcement"
        depends on RPMSG
index 8d45265..58e3b38 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_RPMSG)            += rpmsg_core.o
 obj-$(CONFIG_RPMSG_CHAR)       += rpmsg_char.o
+obj-$(CONFIG_RPMSG_CTRL)       += rpmsg_ctrl.o
 obj-$(CONFIG_RPMSG_NS)         += rpmsg_ns.o
 obj-$(CONFIG_RPMSG_MTK_SCP)    += mtk_rpmsg.o
 qcom_glink-objs                        := qcom_glink_native.o qcom_glink_ssr.o
index 1030cfa..0758651 100644 (file)
@@ -792,7 +792,7 @@ static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
                return -ENXIO;
        }
 
-       dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC);
+       dcmd = kzalloc(struct_size(dcmd, data, extra), GFP_ATOMIC);
        if (!dcmd)
                return -ENOMEM;
 
@@ -1715,7 +1715,7 @@ static int qcom_glink_create_chrdev(struct qcom_glink *glink)
        rpdev->dev.parent = glink->dev;
        rpdev->dev.release = qcom_glink_device_release;
 
-       return rpmsg_chrdev_register_device(rpdev);
+       return rpmsg_ctrldev_register_device(rpdev);
 }
 
 struct qcom_glink *qcom_glink_native_probe(struct device *dev,
index 540e027..764c980 100644 (file)
@@ -1113,7 +1113,7 @@ static int qcom_smd_create_chrdev(struct qcom_smd_edge *edge)
        qsdev->rpdev.dev.parent = &edge->dev;
        qsdev->rpdev.dev.release = qcom_smd_release_device;
 
-       return rpmsg_chrdev_register_device(&qsdev->rpdev);
+       return rpmsg_ctrldev_register_device(&qsdev->rpdev);
 }
 
 /*
@@ -1288,9 +1288,14 @@ static void qcom_channel_state_worker(struct work_struct *work)
                if (channel->state != SMD_CHANNEL_CLOSED)
                        continue;
 
+               /*
+                * Always open rpm_requests, even when already opened which is
+                * required on some SoCs like msm8953.
+                */
                remote_state = GET_RX_CHANNEL_INFO(channel, state);
                if (remote_state != SMD_CHANNEL_OPENING &&
-                   remote_state != SMD_CHANNEL_OPENED)
+                   remote_state != SMD_CHANNEL_OPENED &&
+                   strcmp(channel->name, "rpm_requests"))
                        continue;
 
                if (channel->registered)
@@ -1298,9 +1303,7 @@ static void qcom_channel_state_worker(struct work_struct *work)
 
                spin_unlock_irqrestore(&edge->channels_lock, flags);
                qcom_smd_create_device(channel);
-               channel->registered = true;
                spin_lock_irqsave(&edge->channels_lock, flags);
-
                channel->registered = true;
        }
 
@@ -1605,7 +1608,7 @@ static int __init qcom_smd_init(void)
 {
        return platform_driver_register(&qcom_smd_driver);
 }
-subsys_initcall(qcom_smd_init);
+arch_initcall(qcom_smd_init);
 
 static void __exit qcom_smd_exit(void)
 {
index 5663cf7..b6183d4 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
+ * Copyright (C) 2022, STMicroelectronics
  * Copyright (c) 2016, Linaro Ltd.
  * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
  * Copyright (c) 2012, PetaLogix
 #include <linux/uaccess.h>
 #include <uapi/linux/rpmsg.h>
 
+#include "rpmsg_char.h"
+#include "rpmsg_internal.h"
+
 #define RPMSG_DEV_MAX  (MINORMASK + 1)
 
 static dev_t rpmsg_major;
-static struct class *rpmsg_class;
 
-static DEFINE_IDA(rpmsg_ctrl_ida);
 static DEFINE_IDA(rpmsg_ept_ida);
 static DEFINE_IDA(rpmsg_minor_ida);
 
 #define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
 #define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
 
-#define dev_to_ctrldev(dev) container_of(dev, struct rpmsg_ctrldev, dev)
-#define cdev_to_ctrldev(i_cdev) container_of(i_cdev, struct rpmsg_ctrldev, cdev)
-
-/**
- * struct rpmsg_ctrldev - control device for instantiating endpoint devices
- * @rpdev:     underlaying rpmsg device
- * @cdev:      cdev for the ctrl device
- * @dev:       device for the ctrl device
- */
-struct rpmsg_ctrldev {
-       struct rpmsg_device *rpdev;
-       struct cdev cdev;
-       struct device dev;
-};
-
 /**
  * struct rpmsg_eptdev - endpoint device context
  * @dev:       endpoint device
@@ -63,6 +50,8 @@ struct rpmsg_ctrldev {
  * @queue_lock:        synchronization of @queue operations
  * @queue:     incoming message queue
  * @readq:     wait object for incoming queue
+ * @default_ept: set to channel default endpoint if the default endpoint should be re-used
+ *              on device open to prevent endpoint address update.
  */
 struct rpmsg_eptdev {
        struct device dev;
@@ -73,13 +62,15 @@ struct rpmsg_eptdev {
 
        struct mutex ept_lock;
        struct rpmsg_endpoint *ept;
+       struct rpmsg_endpoint *default_ept;
 
        spinlock_t queue_lock;
        struct sk_buff_head queue;
        wait_queue_head_t readq;
+
 };
 
-static int rpmsg_eptdev_destroy(struct device *dev, void *data)
+int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
 {
        struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
 
@@ -98,6 +89,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
 
        return 0;
 }
+EXPORT_SYMBOL(rpmsg_chrdev_eptdev_destroy);
 
 static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
                        void *priv, u32 addr)
@@ -133,7 +125,15 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
 
        get_device(dev);
 
-       ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
+       /*
+        * If the default_ept is set, the rpmsg device default endpoint is used.
+        * Else a new endpoint is created on open that will be destroyed on release.
+        */
+       if (eptdev->default_ept)
+               ept = eptdev->default_ept;
+       else
+               ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
+
        if (!ept) {
                dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
                put_device(dev);
@@ -154,7 +154,8 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
        /* Close the endpoint, if it's not already destroyed by the parent */
        mutex_lock(&eptdev->ept_lock);
        if (eptdev->ept) {
-               rpmsg_destroy_ept(eptdev->ept);
+               if (!eptdev->default_ept)
+                       rpmsg_destroy_ept(eptdev->ept);
                eptdev->ept = NULL;
        }
        mutex_unlock(&eptdev->ept_lock);
@@ -242,10 +243,13 @@ static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
                goto unlock_eptdev;
        }
 
-       if (filp->f_flags & O_NONBLOCK)
+       if (filp->f_flags & O_NONBLOCK) {
                ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
-       else
+               if (ret == -ENOMEM)
+                       ret = -EAGAIN;
+       } else {
                ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
+       }
 
 unlock_eptdev:
        mutex_unlock(&eptdev->ept_lock);
@@ -281,7 +285,11 @@ static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
        if (cmd != RPMSG_DESTROY_EPT_IOCTL)
                return -EINVAL;
 
-       return rpmsg_eptdev_destroy(&eptdev->dev, NULL);
+       /* Don't allow to destroy a default endpoint. */
+       if (eptdev->default_ept)
+               return -EINVAL;
+
+       return rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
 }
 
 static const struct file_operations rpmsg_eptdev_fops = {
@@ -339,21 +347,18 @@ static void rpmsg_eptdev_release_device(struct device *dev)
        kfree(eptdev);
 }
 
-static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
-                              struct rpmsg_channel_info chinfo)
+static struct rpmsg_eptdev *rpmsg_chrdev_eptdev_alloc(struct rpmsg_device *rpdev,
+                                                     struct device *parent)
 {
-       struct rpmsg_device *rpdev = ctrldev->rpdev;
        struct rpmsg_eptdev *eptdev;
        struct device *dev;
-       int ret;
 
        eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL);
        if (!eptdev)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        dev = &eptdev->dev;
        eptdev->rpdev = rpdev;
-       eptdev->chinfo = chinfo;
 
        mutex_init(&eptdev->ept_lock);
        spin_lock_init(&eptdev->queue_lock);
@@ -362,13 +367,23 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
 
        device_initialize(dev);
        dev->class = rpmsg_class;
-       dev->parent = &ctrldev->dev;
+       dev->parent = parent;
        dev->groups = rpmsg_eptdev_groups;
        dev_set_drvdata(dev, eptdev);
 
        cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops);
        eptdev->cdev.owner = THIS_MODULE;
 
+       return eptdev;
+}
+
+static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_channel_info chinfo)
+{
+       struct device *dev = &eptdev->dev;
+       int ret;
+
+       eptdev->chinfo = chinfo;
+
        ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
        if (ret < 0)
                goto free_eptdev;
@@ -400,163 +415,91 @@ free_eptdev:
        return ret;
 }
 
-static int rpmsg_ctrldev_open(struct inode *inode, struct file *filp)
-{
-       struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
-
-       get_device(&ctrldev->dev);
-       filp->private_data = ctrldev;
-
-       return 0;
-}
-
-static int rpmsg_ctrldev_release(struct inode *inode, struct file *filp)
-{
-       struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
-
-       put_device(&ctrldev->dev);
-
-       return 0;
-}
-
-static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd,
-                               unsigned long arg)
+int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
+                              struct rpmsg_channel_info chinfo)
 {
-       struct rpmsg_ctrldev *ctrldev = fp->private_data;
-       void __user *argp = (void __user *)arg;
-       struct rpmsg_endpoint_info eptinfo;
-       struct rpmsg_channel_info chinfo;
-
-       if (cmd != RPMSG_CREATE_EPT_IOCTL)
-               return -EINVAL;
-
-       if (copy_from_user(&eptinfo, argp, sizeof(eptinfo)))
-               return -EFAULT;
-
-       memcpy(chinfo.name, eptinfo.name, RPMSG_NAME_SIZE);
-       chinfo.name[RPMSG_NAME_SIZE-1] = '\0';
-       chinfo.src = eptinfo.src;
-       chinfo.dst = eptinfo.dst;
-
-       return rpmsg_eptdev_create(ctrldev, chinfo);
-};
+       struct rpmsg_eptdev *eptdev;
+       int ret;
 
-static const struct file_operations rpmsg_ctrldev_fops = {
-       .owner = THIS_MODULE,
-       .open = rpmsg_ctrldev_open,
-       .release = rpmsg_ctrldev_release,
-       .unlocked_ioctl = rpmsg_ctrldev_ioctl,
-       .compat_ioctl = compat_ptr_ioctl,
-};
+       eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
+       if (IS_ERR(eptdev))
+               return PTR_ERR(eptdev);
 
-static void rpmsg_ctrldev_release_device(struct device *dev)
-{
-       struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev);
+       ret = rpmsg_chrdev_eptdev_add(eptdev, chinfo);
 
-       ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
-       ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
-       kfree(ctrldev);
+       return ret;
 }
+EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
 
 static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
 {
-       struct rpmsg_ctrldev *ctrldev;
-       struct device *dev;
-       int ret;
-
-       ctrldev = kzalloc(sizeof(*ctrldev), GFP_KERNEL);
-       if (!ctrldev)
-               return -ENOMEM;
-
-       ctrldev->rpdev = rpdev;
-
-       dev = &ctrldev->dev;
-       device_initialize(dev);
-       dev->parent = &rpdev->dev;
-       dev->class = rpmsg_class;
-
-       cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops);
-       ctrldev->cdev.owner = THIS_MODULE;
-
-       ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
-       if (ret < 0)
-               goto free_ctrldev;
-       dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
-
-       ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL);
-       if (ret < 0)
-               goto free_minor_ida;
-       dev->id = ret;
-       dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
+       struct rpmsg_channel_info chinfo;
+       struct rpmsg_eptdev *eptdev;
+       struct device *dev = &rpdev->dev;
 
-       ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
-       if (ret)
-               goto free_ctrl_ida;
+       memcpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
+       chinfo.src = rpdev->src;
+       chinfo.dst = rpdev->dst;
 
-       /* We can now rely on the release function for cleanup */
-       dev->release = rpmsg_ctrldev_release_device;
+       eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, dev);
+       if (IS_ERR(eptdev))
+               return PTR_ERR(eptdev);
 
-       dev_set_drvdata(&rpdev->dev, ctrldev);
+       /* Set the default_ept to the rpmsg device endpoint */
+       eptdev->default_ept = rpdev->ept;
 
-       return ret;
+       /*
+        * The rpmsg_ept_cb uses *priv parameter to get its rpmsg_eptdev context.
+        * Storedit in default_ept *priv field.
+        */
+       eptdev->default_ept->priv = eptdev;
 
-free_ctrl_ida:
-       ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
-free_minor_ida:
-       ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
-free_ctrldev:
-       put_device(dev);
-       kfree(ctrldev);
-
-       return ret;
+       return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
 }
 
 static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
 {
-       struct rpmsg_ctrldev *ctrldev = dev_get_drvdata(&rpdev->dev);
        int ret;
 
-       /* Destroy all endpoints */
-       ret = device_for_each_child(&ctrldev->dev, NULL, rpmsg_eptdev_destroy);
+       ret = device_for_each_child(&rpdev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
        if (ret)
-               dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
-
-       cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
-       put_device(&ctrldev->dev);
+               dev_warn(&rpdev->dev, "failed to destroy endpoints: %d\n", ret);
 }
 
+static struct rpmsg_device_id rpmsg_chrdev_id_table[] = {
+       { .name = "rpmsg-raw" },
+       { },
+};
+
 static struct rpmsg_driver rpmsg_chrdev_driver = {
        .probe = rpmsg_chrdev_probe,
        .remove = rpmsg_chrdev_remove,
-       .drv = {
-               .name = "rpmsg_chrdev",
-       },
+       .callback = rpmsg_ept_cb,
+       .id_table = rpmsg_chrdev_id_table,
+       .drv.name = "rpmsg_chrdev",
 };
 
 static int rpmsg_chrdev_init(void)
 {
        int ret;
 
-       ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg");
+       ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_char");
        if (ret < 0) {
                pr_err("failed to allocate char dev region\n");
                return ret;
        }
 
-       rpmsg_class = class_create(THIS_MODULE, "rpmsg");
-       if (IS_ERR(rpmsg_class)) {
-               pr_err("failed to create rpmsg class\n");
-               unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
-               return PTR_ERR(rpmsg_class);
-       }
-
        ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
        if (ret < 0) {
-               pr_err("failed to register rpmsg driver\n");
-               class_destroy(rpmsg_class);
-               unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
+               pr_err("rpmsg: failed to register rpmsg raw driver\n");
+               goto free_region;
        }
 
+       return 0;
+
+free_region:
+       unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
+
        return ret;
 }
 postcore_initcall(rpmsg_chrdev_init);
@@ -564,7 +507,6 @@ postcore_initcall(rpmsg_chrdev_init);
 static void rpmsg_chrdev_exit(void)
 {
        unregister_rpmsg_driver(&rpmsg_chrdev_driver);
-       class_destroy(rpmsg_class);
        unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
 }
 module_exit(rpmsg_chrdev_exit);
diff --git a/drivers/rpmsg/rpmsg_char.h b/drivers/rpmsg/rpmsg_char.h
new file mode 100644 (file)
index 0000000..117d9cb
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022, STMicroelectronics
+ */
+
+#ifndef __RPMSG_CHRDEV_H__
+#define __RPMSG_CHRDEV_H__
+
+#if IS_ENABLED(CONFIG_RPMSG_CHAR)
+/**
+ * rpmsg_chrdev_eptdev_create() - register char device based on an endpoint
+ * @rpdev:  prepared rpdev to be used for creating endpoints
+ * @parent: parent device
+ * @chinfo: associated endpoint channel information.
+ *
+ * This function create a new rpmsg char endpoint device to instantiate a new
+ * endpoint based on chinfo information.
+ */
+int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
+                              struct rpmsg_channel_info chinfo);
+
+/**
+ * rpmsg_chrdev_eptdev_destroy() - destroy created char device endpoint.
+ * @data: private data associated to the endpoint device
+ *
+ * This function destroys a rpmsg char endpoint device created by the RPMSG_DESTROY_EPT_IOCTL
+ * control.
+ */
+int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data);
+
+#else  /*IS_ENABLED(CONFIG_RPMSG_CHAR) */
+
+static inline int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
+                                            struct rpmsg_channel_info chinfo)
+{
+       return -ENXIO;
+}
+
+static inline int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
+{
+       return -ENXIO;
+}
+
+#endif /*IS_ENABLED(CONFIG_RPMSG_CHAR) */
+
+#endif /*__RPMSG_CHRDEV_H__ */
index d9e612f..79368a9 100644 (file)
@@ -20,6 +20,9 @@
 
 #include "rpmsg_internal.h"
 
+struct class *rpmsg_class;
+EXPORT_SYMBOL(rpmsg_class);
+
 /**
  * rpmsg_create_channel() - create a new rpmsg channel
  * using its name and address info.
@@ -662,10 +665,17 @@ static int __init rpmsg_init(void)
 {
        int ret;
 
+       rpmsg_class = class_create(THIS_MODULE, "rpmsg");
+       if (IS_ERR(rpmsg_class)) {
+               pr_err("failed to create rpmsg class\n");
+               return PTR_ERR(rpmsg_class);
+       }
+
        ret = bus_register(&rpmsg_bus);
-       if (ret)
+       if (ret) {
                pr_err("failed to register rpmsg bus: %d\n", ret);
-
+               class_destroy(rpmsg_class);
+       }
        return ret;
 }
 postcore_initcall(rpmsg_init);
@@ -673,6 +683,7 @@ postcore_initcall(rpmsg_init);
 static void __exit rpmsg_fini(void)
 {
        bus_unregister(&rpmsg_bus);
+       class_destroy(rpmsg_class);
 }
 module_exit(rpmsg_fini);
 
diff --git a/drivers/rpmsg/rpmsg_ctrl.c b/drivers/rpmsg/rpmsg_ctrl.c
new file mode 100644 (file)
index 0000000..107da70
--- /dev/null
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022, STMicroelectronics
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2012, PetaLogix
+ * Copyright (c) 2011, Texas Instruments, Inc.
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * Based on rpmsg performance statistics driver by Michal Simek, which in turn
+ * was based on TI & Google OMX rpmsg driver.
+ */
+
+#define pr_fmt(fmt)            KBUILD_MODNAME ": " fmt
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/rpmsg.h>
+
+#include "rpmsg_char.h"
+#include "rpmsg_internal.h"
+
+#define RPMSG_DEV_MAX  (MINORMASK + 1)
+
+static dev_t rpmsg_major;
+
+static DEFINE_IDA(rpmsg_ctrl_ida);
+static DEFINE_IDA(rpmsg_minor_ida);
+
+#define dev_to_ctrldev(dev) container_of(dev, struct rpmsg_ctrldev, dev)
+#define cdev_to_ctrldev(i_cdev) container_of(i_cdev, struct rpmsg_ctrldev, cdev)
+
+/**
+ * struct rpmsg_ctrldev - control device for instantiating endpoint devices
+ * @rpdev:     underlaying rpmsg device
+ * @cdev:      cdev for the ctrl device
+ * @dev:       device for the ctrl device
+ * @ctrl_lock: serialize the ioctrls.
+ */
+struct rpmsg_ctrldev {
+       struct rpmsg_device *rpdev;
+       struct cdev cdev;
+       struct device dev;
+       struct mutex ctrl_lock;
+};
+
+static int rpmsg_ctrldev_open(struct inode *inode, struct file *filp)
+{
+       struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
+
+       get_device(&ctrldev->dev);
+       filp->private_data = ctrldev;
+
+       return 0;
+}
+
+static int rpmsg_ctrldev_release(struct inode *inode, struct file *filp)
+{
+       struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev);
+
+       put_device(&ctrldev->dev);
+
+       return 0;
+}
+
+static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd,
+                               unsigned long arg)
+{
+       struct rpmsg_ctrldev *ctrldev = fp->private_data;
+       void __user *argp = (void __user *)arg;
+       struct rpmsg_endpoint_info eptinfo;
+       struct rpmsg_channel_info chinfo;
+       struct rpmsg_device *rpdev;
+       int ret = 0;
+
+       if (copy_from_user(&eptinfo, argp, sizeof(eptinfo)))
+               return -EFAULT;
+
+       memcpy(chinfo.name, eptinfo.name, RPMSG_NAME_SIZE);
+       chinfo.name[RPMSG_NAME_SIZE - 1] = '\0';
+       chinfo.src = eptinfo.src;
+       chinfo.dst = eptinfo.dst;
+
+       mutex_lock(&ctrldev->ctrl_lock);
+       switch (cmd) {
+       case RPMSG_CREATE_EPT_IOCTL:
+               ret = rpmsg_chrdev_eptdev_create(ctrldev->rpdev, &ctrldev->dev, chinfo);
+               break;
+
+       case RPMSG_CREATE_DEV_IOCTL:
+               rpdev = rpmsg_create_channel(ctrldev->rpdev, &chinfo);
+               if (!rpdev) {
+                       dev_err(&ctrldev->dev, "failed to create %s channel\n", chinfo.name);
+                       ret = -ENXIO;
+               }
+               break;
+
+       case RPMSG_RELEASE_DEV_IOCTL:
+               ret = rpmsg_release_channel(ctrldev->rpdev, &chinfo);
+               if (ret)
+                       dev_err(&ctrldev->dev, "failed to release %s channel (%d)\n",
+                               chinfo.name, ret);
+               break;
+
+       default:
+               ret = -EINVAL;
+       }
+       mutex_unlock(&ctrldev->ctrl_lock);
+
+       return ret;
+};
+
+static const struct file_operations rpmsg_ctrldev_fops = {
+       .owner = THIS_MODULE,
+       .open = rpmsg_ctrldev_open,
+       .release = rpmsg_ctrldev_release,
+       .unlocked_ioctl = rpmsg_ctrldev_ioctl,
+       .compat_ioctl = compat_ptr_ioctl,
+};
+
+static void rpmsg_ctrldev_release_device(struct device *dev)
+{
+       struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev);
+
+       ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
+       ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+       kfree(ctrldev);
+}
+
+static int rpmsg_ctrldev_probe(struct rpmsg_device *rpdev)
+{
+       struct rpmsg_ctrldev *ctrldev;
+       struct device *dev;
+       int ret;
+
+       ctrldev = kzalloc(sizeof(*ctrldev), GFP_KERNEL);
+       if (!ctrldev)
+               return -ENOMEM;
+
+       ctrldev->rpdev = rpdev;
+
+       dev = &ctrldev->dev;
+       device_initialize(dev);
+       dev->parent = &rpdev->dev;
+       dev->class = rpmsg_class;
+
+       mutex_init(&ctrldev->ctrl_lock);
+       cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops);
+       ctrldev->cdev.owner = THIS_MODULE;
+
+       ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
+       if (ret < 0)
+               goto free_ctrldev;
+       dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
+
+       ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL);
+       if (ret < 0)
+               goto free_minor_ida;
+       dev->id = ret;
+       dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
+
+       ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
+       if (ret)
+               goto free_ctrl_ida;
+
+       /* We can now rely on the release function for cleanup */
+       dev->release = rpmsg_ctrldev_release_device;
+
+       dev_set_drvdata(&rpdev->dev, ctrldev);
+
+       return ret;
+
+free_ctrl_ida:
+       ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
+free_minor_ida:
+       ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+free_ctrldev:
+       put_device(dev);
+       kfree(ctrldev);
+
+       return ret;
+}
+
+static void rpmsg_ctrldev_remove(struct rpmsg_device *rpdev)
+{
+       struct rpmsg_ctrldev *ctrldev = dev_get_drvdata(&rpdev->dev);
+       int ret;
+
+       /* Destroy all endpoints */
+       ret = device_for_each_child(&ctrldev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
+       if (ret)
+               dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
+
+       cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
+       put_device(&ctrldev->dev);
+}
+
+static struct rpmsg_driver rpmsg_ctrldev_driver = {
+       .probe = rpmsg_ctrldev_probe,
+       .remove = rpmsg_ctrldev_remove,
+       .drv = {
+               .name = "rpmsg_ctrl",
+       },
+};
+
+static int rpmsg_ctrldev_init(void)
+{
+       int ret;
+
+       ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_ctrl");
+       if (ret < 0) {
+               pr_err("failed to allocate char dev region\n");
+               return ret;
+       }
+
+       ret = register_rpmsg_driver(&rpmsg_ctrldev_driver);
+       if (ret < 0) {
+               pr_err("failed to register rpmsg driver\n");
+               unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
+       }
+
+       return ret;
+}
+postcore_initcall(rpmsg_ctrldev_init);
+
+static void rpmsg_ctrldev_exit(void)
+{
+       unregister_rpmsg_driver(&rpmsg_ctrldev_driver);
+       unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
+}
+module_exit(rpmsg_ctrldev_exit);
+
+MODULE_DESCRIPTION("rpmsg control interface");
+MODULE_ALIAS("rpmsg:" KBUILD_MODNAME);
+MODULE_LICENSE("GPL v2");
index b1245d3..d4b23fd 100644 (file)
@@ -18,6 +18,8 @@
 #define to_rpmsg_device(d) container_of(d, struct rpmsg_device, dev)
 #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
 
+extern struct class *rpmsg_class;
+
 /**
  * struct rpmsg_device_ops - indirection table for the rpmsg_device operations
  * @create_channel:    create backend-specific channel, optional
@@ -84,16 +86,16 @@ struct rpmsg_device *rpmsg_create_channel(struct rpmsg_device *rpdev,
 int rpmsg_release_channel(struct rpmsg_device *rpdev,
                          struct rpmsg_channel_info *chinfo);
 /**
- * rpmsg_chrdev_register_device() - register chrdev device based on rpdev
+ * rpmsg_ctrldev_register_device() - register a char device for control based on rpdev
  * @rpdev:     prepared rpdev to be used for creating endpoints
  *
  * This function wraps rpmsg_register_device() preparing the rpdev for use as
  * basis for the rpmsg chrdev.
  */
-static inline int rpmsg_chrdev_register_device(struct rpmsg_device *rpdev)
+static inline int rpmsg_ctrldev_register_device(struct rpmsg_device *rpdev)
 {
-       strcpy(rpdev->id.name, "rpmsg_chrdev");
-       rpdev->driver_override = "rpmsg_chrdev";
+       strcpy(rpdev->id.name, "rpmsg_ctrl");
+       rpdev->driver_override = "rpmsg_ctrl";
 
        return rpmsg_register_device(rpdev);
 }
index ac764e0..3ede25b 100644 (file)
@@ -849,7 +849,7 @@ static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev
        rpdev_ctrl->dev.release = virtio_rpmsg_release_device;
        rpdev_ctrl->little_endian = virtio_is_little_endian(vrp->vdev);
 
-       err = rpmsg_chrdev_register_device(rpdev_ctrl);
+       err = rpmsg_ctrldev_register_device(rpdev_ctrl);
        if (err) {
                kfree(vch);
                return ERR_PTR(err);
index f6d6d4c..41c65b4 100644 (file)
@@ -1293,6 +1293,16 @@ config RTC_DRV_OPAL
          This driver can also be built as a module. If so, the module
          will be called rtc-opal.
 
+config RTC_DRV_OPTEE
+       tristate "OP-TEE based RTC driver"
+       depends on OPTEE
+       help
+         Select this to get support for OP-TEE based RTC control on SoCs where
+         RTC are not accessible to the normal world (Linux).
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-optee.
+
 config RTC_DRV_ZYNQMP
        tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
        depends on OF && HAS_IOMEM
index e92f3e9..2d827d8 100644 (file)
@@ -115,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_GAMECUBE)      += rtc-gamecube.o
 obj-$(CONFIG_RTC_DRV_NTXEC)    += rtc-ntxec.o
 obj-$(CONFIG_RTC_DRV_OMAP)     += rtc-omap.o
 obj-$(CONFIG_RTC_DRV_OPAL)     += rtc-opal.o
+obj-$(CONFIG_RTC_DRV_OPTEE)    += rtc-optee.o
 obj-$(CONFIG_RTC_DRV_PALMAS)   += rtc-palmas.o
 obj-$(CONFIG_RTC_DRV_PCAP)     += rtc-pcap.o
 obj-$(CONFIG_RTC_DRV_PCF2123)  += rtc-pcf2123.o
index 4b460c6..3c8eec2 100644 (file)
@@ -26,6 +26,15 @@ struct class *rtc_class;
 static void rtc_device_release(struct device *dev)
 {
        struct rtc_device *rtc = to_rtc_device(dev);
+       struct timerqueue_head *head = &rtc->timerqueue;
+       struct timerqueue_node *node;
+
+       mutex_lock(&rtc->ops_lock);
+       while ((node = timerqueue_getnext(head)))
+               timerqueue_del(head, node);
+       mutex_unlock(&rtc->ops_lock);
+
+       cancel_work_sync(&rtc->irqwork);
 
        ida_simple_remove(&rtc_ida, rtc->id);
        mutex_destroy(&rtc->ops_lock);
@@ -390,9 +399,6 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
        if (!rtc->ops->set_alarm)
                clear_bit(RTC_FEATURE_ALARM, rtc->features);
 
-       if (rtc->uie_unsupported)
-               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
-
        if (rtc->ops->set_offset)
                set_bit(RTC_FEATURE_CORRECTION, rtc->features);
 
index d8e8357..9edd662 100644 (file)
@@ -804,9 +804,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
        struct rtc_time tm;
        ktime_t now;
+       int err;
+
+       err = __rtc_read_time(rtc, &tm);
+       if (err)
+               return err;
 
        timer->enabled = 1;
-       __rtc_read_time(rtc, &tm);
        now = rtc_tm_to_ktime(tm);
 
        /* Skip over expired timers */
@@ -820,7 +824,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        trace_rtc_timer_enqueue(timer);
        if (!next || ktime_before(timer->node.expires, next->expires)) {
                struct rtc_wkalrm alarm;
-               int err;
 
                alarm.time = rtc_ktime_to_tm(timer->node.expires);
                alarm.enabled = 1;
index 336cb9a..d51565b 100644 (file)
@@ -1955,7 +1955,7 @@ static int ds1307_probe(struct i2c_client *client,
                dev_info(ds1307->dev,
                         "'wakeup-source' is set, request for an IRQ is disabled!\n");
                /* We cannot support UIE mode if we do not have an IRQ line */
-               ds1307->rtc->uie_unsupported = 1;
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, ds1307->rtc->features);
        }
 
        if (want_irq) {
index 75db7ab..a24331b 100644 (file)
@@ -1273,7 +1273,7 @@ ds1685_rtc_probe(struct platform_device *pdev)
 
        /* See if the platform doesn't support UIE. */
        if (pdata->uie_unsupported)
-               rtc_dev->uie_unsupported = 1;
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc_dev->features);
 
        rtc->dev = rtc_dev;
 
@@ -1285,13 +1285,10 @@ ds1685_rtc_probe(struct platform_device *pdev)
         * there won't be an automatic way of notifying the kernel about it,
         * unless ctrlc is explicitly polled.
         */
-       if (!pdata->no_irq) {
-               ret = platform_get_irq(pdev, 0);
-               if (ret <= 0)
-                       return ret;
-
-               rtc->irq_num = ret;
-
+       rtc->irq_num = platform_get_irq(pdev, 0);
+       if (rtc->irq_num <= 0) {
+               clear_bit(RTC_FEATURE_ALARM, rtc_dev->features);
+       } else {
                /* Request an IRQ. */
                ret = devm_request_threaded_irq(&pdev->dev, rtc->irq_num,
                                       NULL, ds1685_rtc_irq_handler,
@@ -1305,7 +1302,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
                        rtc->irq_num = 0;
                }
        }
-       rtc->no_irq = pdata->no_irq;
 
        /* Setup complete. */
        ds1685_rtc_switch_to_bank0(rtc);
@@ -1394,7 +1390,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
                 * have been taken care of by the shutdown scripts and this
                 * is the final function call.
                 */
-               if (!rtc->no_irq)
+               if (rtc->irq_num)
                        disable_irq_nosync(rtc->irq_num);
 
                /* Oscillator must be on and the countdown chain enabled. */
index 138c5e0..11850c2 100644 (file)
@@ -261,15 +261,17 @@ static int __init efi_rtc_probe(struct platform_device *dev)
        if (efi.get_time(&eft, &cap) != EFI_SUCCESS)
                return -ENODEV;
 
-       rtc = devm_rtc_device_register(&dev->dev, "rtc-efi", &efi_rtc_ops,
-                                       THIS_MODULE);
+       rtc = devm_rtc_allocate_device(&dev->dev);
        if (IS_ERR(rtc))
                return PTR_ERR(rtc);
 
-       rtc->uie_unsupported = 1;
        platform_set_drvdata(dev, rtc);
 
-       return 0;
+       rtc->ops = &efi_rtc_ops;
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+       set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+
+       return devm_rtc_register_device(rtc);
 }
 
 static struct platform_driver efi_rtc_driver = {
index f717b36..18ca3b3 100644 (file)
@@ -235,6 +235,7 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
        }
 
        ret = of_address_to_resource(np, 0, &res);
+       of_node_put(np);
        if (ret) {
                pr_err("no io memory range found\n");
                return -1;
index 0751cae..90e602e 100644 (file)
@@ -220,24 +220,6 @@ static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
        u8 buf[4];
        int ret;
 
-       /*
-        * The alarm has no seconds so deal with it
-        */
-       if (alm_tm->tm_sec) {
-               alm_tm->tm_sec = 0;
-               alm_tm->tm_min++;
-               if (alm_tm->tm_min >= 60) {
-                       alm_tm->tm_min = 0;
-                       alm_tm->tm_hour++;
-                       if (alm_tm->tm_hour >= 24) {
-                               alm_tm->tm_hour = 0;
-                               alm_tm->tm_mday++;
-                               if (alm_tm->tm_mday > 31)
-                                       alm_tm->tm_mday = 0;
-                       }
-               }
-       }
-
        ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
        if (ret < 0)
                return ret;
@@ -523,6 +505,10 @@ static int hym8563_probe(struct i2c_client *client,
        if (!hym8563)
                return -ENOMEM;
 
+       hym8563->rtc = devm_rtc_allocate_device(&client->dev);
+       if (IS_ERR(hym8563->rtc))
+               return PTR_ERR(hym8563->rtc);
+
        hym8563->client = client;
        i2c_set_clientdata(client, hym8563);
 
@@ -557,19 +543,15 @@ static int hym8563_probe(struct i2c_client *client,
        dev_dbg(&client->dev, "rtc information is %s\n",
                (ret & HYM8563_SEC_VL) ? "invalid" : "valid");
 
-       hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
-                                               &hym8563_rtc_ops, THIS_MODULE);
-       if (IS_ERR(hym8563->rtc))
-               return PTR_ERR(hym8563->rtc);
-
-       /* the hym8563 alarm only supports a minute accuracy */
-       hym8563->rtc->uie_unsupported = 1;
+       hym8563->rtc->ops = &hym8563_rtc_ops;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, hym8563->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, hym8563->rtc->features);
 
 #ifdef CONFIG_COMMON_CLK
        hym8563_clkout_register_clk(hym8563);
 #endif
 
-       return 0;
+       return devm_rtc_register_device(hym8563->rtc);
 }
 
 static const struct i2c_device_id hym8563_id[] = {
index 6d383b6..d868458 100644 (file)
@@ -932,10 +932,8 @@ static int m41t80_probe(struct i2c_client *client,
        m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        m41t80_data->rtc->range_max = RTC_TIMESTAMP_END_2099;
 
-       if (client->irq <= 0) {
-               /* We cannot support UIE mode if we do not have an IRQ line */
-               m41t80_data->rtc->uie_unsupported = 1;
-       }
+       if (client->irq <= 0)
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, m41t80_data->rtc->features);
 
        /* Make sure HT (Halt Update) bit is cleared */
        rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
index ae9f131..522449b 100644 (file)
@@ -176,6 +176,17 @@ int mc146818_get_time(struct rtc_time *time)
 }
 EXPORT_SYMBOL_GPL(mc146818_get_time);
 
+/* AMD systems don't allow access to AltCentury with DV1 */
+static bool apply_amd_register_a_behavior(void)
+{
+#ifdef CONFIG_X86
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+               return true;
+#endif
+       return false;
+}
+
 /* Set the current date and time in the real time clock. */
 int mc146818_set_time(struct rtc_time *time)
 {
@@ -232,8 +243,10 @@ int mc146818_set_time(struct rtc_time *time)
        if (yrs >= 100)
                yrs -= 100;
 
-       if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
-           || RTC_ALWAYS_BCD) {
+       spin_lock_irqsave(&rtc_lock, flags);
+       save_control = CMOS_READ(RTC_CONTROL);
+       spin_unlock_irqrestore(&rtc_lock, flags);
+       if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
                sec = bin2bcd(sec);
                min = bin2bcd(min);
                hrs = bin2bcd(hrs);
@@ -247,7 +260,10 @@ int mc146818_set_time(struct rtc_time *time)
        save_control = CMOS_READ(RTC_CONTROL);
        CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
        save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-       CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+       if (apply_amd_register_a_behavior())
+               CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
+       else
+               CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
 
 #ifdef CONFIG_MACH_DECSTATION
        CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
index bb2ea9b..6d7656a 100644 (file)
@@ -210,20 +210,6 @@ static int mpc5121_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
        struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
        struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
 
-       /*
-        * the alarm has no seconds so deal with it
-        */
-       if (alarm->time.tm_sec) {
-               alarm->time.tm_sec = 0;
-               alarm->time.tm_min++;
-               if (alarm->time.tm_min >= 60) {
-                       alarm->time.tm_min = 0;
-                       alarm->time.tm_hour++;
-                       if (alarm->time.tm_hour >= 24)
-                               alarm->time.tm_hour = 0;
-               }
-       }
-
        alarm->time.tm_mday = -1;
        alarm->time.tm_mon = -1;
        alarm->time.tm_year = -1;
@@ -349,7 +335,8 @@ static int mpc5121_rtc_probe(struct platform_device *op)
        }
 
        rtc->rtc->ops = &mpc5200_rtc_ops;
-       rtc->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc->features);
        rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
        rtc->rtc->range_max = 65733206399ULL; /* 4052-12-31 23:59:59 */
 
index f8f49a9..ad41aaf 100644 (file)
@@ -250,7 +250,7 @@ static int opal_rtc_probe(struct platform_device *pdev)
        rtc->ops = &opal_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
        rtc->range_max = RTC_TIMESTAMP_END_9999;
-       rtc->uie_unsupported = 1;
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
 
        return devm_rtc_register_device(rtc);
 }
diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c
new file mode 100644 (file)
index 0000000..9f8b5d4
--- /dev/null
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Microchip.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/tee_drv.h>
+
+#define RTC_INFO_VERSION       0x1
+
+#define TA_CMD_RTC_GET_INFO            0x0
+#define TA_CMD_RTC_GET_TIME            0x1
+#define TA_CMD_RTC_SET_TIME            0x2
+#define TA_CMD_RTC_GET_OFFSET          0x3
+#define TA_CMD_RTC_SET_OFFSET          0x4
+
+#define TA_RTC_FEATURE_CORRECTION      BIT(0)
+
+struct optee_rtc_time {
+       u32 tm_sec;
+       u32 tm_min;
+       u32 tm_hour;
+       u32 tm_mday;
+       u32 tm_mon;
+       u32 tm_year;
+       u32 tm_wday;
+};
+
+struct optee_rtc_info {
+       u64 version;
+       u64 features;
+       struct optee_rtc_time range_min;
+       struct optee_rtc_time range_max;
+};
+
+/**
+ * struct optee_rtc - OP-TEE RTC private data
+ * @dev:               OP-TEE based RTC device.
+ * @ctx:               OP-TEE context handler.
+ * @session_id:                RTC TA session identifier.
+ * @shm:               Memory pool shared with RTC device.
+ * @features:          Bitfield of RTC features
+ */
+struct optee_rtc {
+       struct device *dev;
+       struct tee_context *ctx;
+       u32 session_id;
+       struct tee_shm *shm;
+       u64 features;
+};
+
+static int optee_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct optee_rtc_time *optee_tm;
+       struct tee_param param[4] = {0};
+       int ret;
+
+       inv_arg.func = TA_CMD_RTC_GET_TIME;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       /* Fill invoke cmd params */
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(struct optee_rtc_time);
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       optee_tm = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(optee_tm))
+               return PTR_ERR(optee_tm);
+
+       if (param[0].u.memref.size != sizeof(*optee_tm))
+               return -EPROTO;
+
+       tm->tm_sec = optee_tm->tm_sec;
+       tm->tm_min = optee_tm->tm_min;
+       tm->tm_hour = optee_tm->tm_hour;
+       tm->tm_mday = optee_tm->tm_mday;
+       tm->tm_mon = optee_tm->tm_mon;
+       tm->tm_year = optee_tm->tm_year - 1900;
+       tm->tm_wday = optee_tm->tm_wday;
+       tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+       return 0;
+}
+
+static int optee_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       struct optee_rtc_time optee_tm;
+       void *rtc_data;
+       int ret;
+
+       optee_tm.tm_sec = tm->tm_sec;
+       optee_tm.tm_min = tm->tm_min;
+       optee_tm.tm_hour = tm->tm_hour;
+       optee_tm.tm_mday = tm->tm_mday;
+       optee_tm.tm_mon = tm->tm_mon;
+       optee_tm.tm_year = tm->tm_year + 1900;
+       optee_tm.tm_wday = tm->tm_wday;
+
+       inv_arg.func = TA_CMD_RTC_SET_TIME;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(struct optee_rtc_time);
+
+       rtc_data = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(rtc_data))
+               return PTR_ERR(rtc_data);
+
+       memcpy(rtc_data, &optee_tm, sizeof(struct optee_rtc_time));
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       return 0;
+}
+
+static int optee_rtc_readoffset(struct device *dev, long *offset)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       int ret;
+
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               return -EOPNOTSUPP;
+
+       inv_arg.func = TA_CMD_RTC_GET_OFFSET;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       *offset = param[0].u.value.a;
+
+       return 0;
+}
+
+static int optee_rtc_setoffset(struct device *dev, long offset)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       int ret;
+
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               return -EOPNOTSUPP;
+
+       inv_arg.func = TA_CMD_RTC_SET_OFFSET;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+       param[0].u.value.a = offset;
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       return 0;
+}
+
+static const struct rtc_class_ops optee_rtc_ops = {
+       .read_time      = optee_rtc_readtime,
+       .set_time       = optee_rtc_settime,
+       .set_offset     = optee_rtc_setoffset,
+       .read_offset    = optee_rtc_readoffset,
+};
+
+static int optee_rtc_read_info(struct device *dev, struct rtc_device *rtc,
+                              u64 *features)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       struct optee_rtc_info *info;
+       struct optee_rtc_time *tm;
+       int ret;
+
+       inv_arg.func = TA_CMD_RTC_GET_INFO;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(*info);
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       info = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+
+       if (param[0].u.memref.size != sizeof(*info))
+               return -EPROTO;
+
+       if (info->version != RTC_INFO_VERSION)
+               return -EPROTO;
+
+       *features = info->features;
+
+       tm = &info->range_min;
+       rtc->range_min = mktime64(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+                                 tm->tm_sec);
+       tm = &info->range_max;
+       rtc->range_max = mktime64(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+                                 tm->tm_sec);
+
+       return 0;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+       if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+               return 1;
+       else
+               return 0;
+}
+
+static int optee_rtc_probe(struct device *dev)
+{
+       struct tee_client_device *rtc_device = to_tee_client_device(dev);
+       struct tee_ioctl_open_session_arg sess_arg;
+       struct optee_rtc *priv;
+       struct rtc_device *rtc;
+       struct tee_shm *shm;
+       int ret, err;
+
+       memset(&sess_arg, 0, sizeof(sess_arg));
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       rtc = devm_rtc_allocate_device(dev);
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
+
+       /* Open context with TEE driver */
+       priv->ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+       if (IS_ERR(priv->ctx))
+               return -ENODEV;
+
+       /* Open session with rtc Trusted App */
+       export_uuid(sess_arg.uuid, &rtc_device->id.uuid);
+       sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+
+       ret = tee_client_open_session(priv->ctx, &sess_arg, NULL);
+       if (ret < 0 || sess_arg.ret != 0) {
+               dev_err(dev, "tee_client_open_session failed, err: %x\n", sess_arg.ret);
+               err = -EINVAL;
+               goto out_ctx;
+       }
+       priv->session_id = sess_arg.session;
+
+       shm = tee_shm_alloc_kernel_buf(priv->ctx, sizeof(struct optee_rtc_info));
+       if (IS_ERR(shm)) {
+               dev_err(priv->dev, "tee_shm_alloc_kernel_buf failed\n");
+               err = PTR_ERR(shm);
+               goto out_sess;
+       }
+
+       priv->shm = shm;
+       priv->dev = dev;
+       dev_set_drvdata(dev, priv);
+
+       rtc->ops = &optee_rtc_ops;
+
+       err = optee_rtc_read_info(dev, rtc, &priv->features);
+       if (err) {
+               dev_err(dev, "Failed to get RTC features from OP-TEE\n");
+               goto out_shm;
+       }
+
+       err = devm_rtc_register_device(rtc);
+       if (err)
+               goto out_shm;
+
+       /*
+        * We must clear this bit after registering because rtc_register_device
+        * will set it if it sees that .set_offset is provided.
+        */
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               clear_bit(RTC_FEATURE_CORRECTION, rtc->features);
+
+       return 0;
+
+out_shm:
+       tee_shm_free(priv->shm);
+out_sess:
+       tee_client_close_session(priv->ctx, priv->session_id);
+out_ctx:
+       tee_client_close_context(priv->ctx);
+
+       return err;
+}
+
+static int optee_rtc_remove(struct device *dev)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+
+       tee_client_close_session(priv->ctx, priv->session_id);
+       tee_client_close_context(priv->ctx);
+
+       return 0;
+}
+
+static const struct tee_client_device_id optee_rtc_id_table[] = {
+       {UUID_INIT(0xf389f8c8, 0x845f, 0x496c,
+                  0x8b, 0xbe, 0xd6, 0x4b, 0xd2, 0x4c, 0x92, 0xfd)},
+       {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_rtc_id_table);
+
+static struct tee_client_driver optee_rtc_driver = {
+       .id_table       = optee_rtc_id_table,
+       .driver         = {
+               .name           = "optee_rtc",
+               .bus            = &tee_bus_type,
+               .probe          = optee_rtc_probe,
+               .remove         = optee_rtc_remove,
+       },
+};
+
+static int __init optee_rtc_mod_init(void)
+{
+       return driver_register(&optee_rtc_driver.driver);
+}
+
+static void __exit optee_rtc_mod_exit(void)
+{
+       driver_unregister(&optee_rtc_driver.driver);
+}
+
+module_init(optee_rtc_mod_init);
+module_exit(optee_rtc_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
+MODULE_DESCRIPTION("OP-TEE based RTC driver");
index 7473e6c..e13b5e6 100644 (file)
@@ -427,7 +427,8 @@ static int pcf2123_probe(struct spi_device *spi)
         * support to this driver to generate interrupts more than once
         * per minute.
         */
-       rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
        rtc->ops = &pcf2123_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        rtc->range_max = RTC_TIMESTAMP_END_2099;
index 81a5b1f..63b275b 100644 (file)
@@ -374,7 +374,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
 static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
        struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
-       unsigned int buf[5], ctrl2;
+       u8 buf[5];
+       unsigned int ctrl2;
        int ret;
 
        ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
@@ -655,13 +656,25 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
        pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
        pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
-       pcf2127->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_2S, pcf2127->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf2127->rtc->features);
        clear_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
 
        if (alarm_irq > 0) {
+               unsigned long flags;
+
+               /*
+                * If flags = 0, devm_request_threaded_irq() will use IRQ flags
+                * obtained from device tree.
+                */
+               if (dev_fwnode(dev))
+                       flags = 0;
+               else
+                       flags = IRQF_TRIGGER_LOW;
+
                ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
                                                pcf2127_rtc_irq,
-                                               IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                               flags | IRQF_ONESHOT,
                                                dev_name(dev), dev);
                if (ret) {
                        dev_err(dev, "failed to request alarm irq\n");
index df2b072..9760824 100644 (file)
@@ -616,7 +616,8 @@ static int pcf85063_probe(struct i2c_client *client)
        pcf85063->rtc->ops = &pcf85063_rtc_ops;
        pcf85063->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf85063->rtc->range_max = RTC_TIMESTAMP_END_2099;
-       pcf85063->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_2S, pcf85063->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf85063->rtc->features);
        clear_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
 
        if (config->has_alarms && client->irq > 0) {
index c93acad..b1b1943 100644 (file)
@@ -212,14 +212,6 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
        if (err < 0)
                return err;
 
-       /* The alarm has no seconds, round up to nearest minute */
-       if (tm->time.tm_sec) {
-               time64_t alarm_time = rtc_tm_to_time64(&tm->time);
-
-               alarm_time += 60 - tm->time.tm_sec;
-               rtc_time64_to_tm(alarm_time, &tm->time);
-       }
-
        regs[0] = bin2bcd(tm->time.tm_min);
        regs[1] = bin2bcd(tm->time.tm_hour);
        regs[2] = bin2bcd(tm->time.tm_mday);
@@ -240,9 +232,9 @@ static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
 {
        struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
        int ret;
+       u32 value;
 
        switch(param->param) {
-               u32 value;
 
        case RTC_PARAM_BACKUP_SWITCH_MODE:
                ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
@@ -279,9 +271,9 @@ static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
 static int pcf8523_param_set(struct device *dev, struct rtc_param *param)
 {
        struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+       u8 mode;
 
        switch(param->param) {
-               u8 mode;
        case RTC_PARAM_BACKUP_SWITCH_MODE:
                switch (param->uvalue) {
                case RTC_BSM_DISABLED:
@@ -450,7 +442,8 @@ static int pcf8523_probe(struct i2c_client *client,
        rtc->ops = &pcf8523_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        rtc->range_max = RTC_TIMESTAMP_END_2099;
-       rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
 
        if (client->irq > 0) {
                err = regmap_write(pcf8523->regmap, PCF8523_TMR_CLKOUT_CTRL, 0x38);
index c8bddfb..9d06813 100644 (file)
@@ -330,19 +330,6 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
        unsigned char buf[4];
        int err;
 
-       /* The alarm has no seconds, round up to nearest minute */
-       if (tm->time.tm_sec) {
-               time64_t alarm_time = rtc_tm_to_time64(&tm->time);
-
-               alarm_time += 60 - tm->time.tm_sec;
-               rtc_time64_to_tm(alarm_time, &tm->time);
-       }
-
-       dev_dbg(dev, "%s, min=%d hour=%d wday=%d mday=%d "
-               "enabled=%d pending=%d\n", __func__,
-               tm->time.tm_min, tm->time.tm_hour, tm->time.tm_wday,
-               tm->time.tm_mday, tm->enabled, tm->pending);
-
        buf[0] = bin2bcd(tm->time.tm_min);
        buf[1] = bin2bcd(tm->time.tm_hour);
        buf[2] = bin2bcd(tm->time.tm_mday);
@@ -565,7 +552,8 @@ static int pcf8563_probe(struct i2c_client *client,
 
        pcf8563->rtc->ops = &pcf8563_rtc_ops;
        /* the pcf8563 alarm only supports a minute accuracy */
-       pcf8563->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, pcf8563->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf8563->rtc->features);
        pcf8563->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf8563->rtc->range_max = RTC_TIMESTAMP_END_2099;
        pcf8563->rtc->set_start_time = true;
index e38ee88..bad6a5d 100644 (file)
@@ -350,9 +350,6 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
                }
        }
 
-       if (!adev->irq[0])
-               clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
-
        device_init_wakeup(&adev->dev, true);
        ldata->rtc = devm_rtc_allocate_device(&adev->dev);
        if (IS_ERR(ldata->rtc)) {
@@ -360,6 +357,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
                goto out;
        }
 
+       if (!adev->irq[0])
+               clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
+
        ldata->rtc->ops = ops;
        ldata->rtc->range_min = vendor->range_min;
        ldata->rtc->range_max = vendor->range_max;
index 29a1c65..dc6d147 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/rtc.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -83,7 +84,7 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
        const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
 
        if (!rtc_dd->allow_set_time)
-               return -EACCES;
+               return -ENODEV;
 
        secs = rtc_tm_to_time64(tm);
 
@@ -527,40 +528,28 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
                return rc;
        }
 
-       return devm_rtc_register_device(rtc_dd->rtc);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int pm8xxx_rtc_resume(struct device *dev)
-{
-       struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+       rc = devm_rtc_register_device(rtc_dd->rtc);
+       if (rc)
+               return rc;
 
-       if (device_may_wakeup(dev))
-               disable_irq_wake(rtc_dd->rtc_alarm_irq);
+       rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->rtc_alarm_irq);
+       if (rc)
+               return rc;
 
        return 0;
 }
 
-static int pm8xxx_rtc_suspend(struct device *dev)
+static int pm8xxx_remove(struct platform_device *pdev)
 {
-       struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
-
-       if (device_may_wakeup(dev))
-               enable_irq_wake(rtc_dd->rtc_alarm_irq);
-
+       dev_pm_clear_wake_irq(&pdev->dev);
        return 0;
 }
-#endif
-
-static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops,
-                        pm8xxx_rtc_suspend,
-                        pm8xxx_rtc_resume);
 
 static struct platform_driver pm8xxx_rtc_driver = {
        .probe          = pm8xxx_rtc_probe,
+       .remove         = pm8xxx_remove,
        .driver = {
                .name           = "rtc-pm8xxx",
-               .pm             = &pm8xxx_rtc_pm_ops,
                .of_match_table = pm8xxx_id_table,
        },
 };
index b4a5200..d4777b0 100644 (file)
@@ -204,8 +204,10 @@ static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm)
        /* we don't report wday/yday/isdst ... */
        rtc_wait_not_busy(config);
 
-       time = readl(config->ioaddr + TIME_REG);
-       date = readl(config->ioaddr + DATE_REG);
+       do {
+               time = readl(config->ioaddr + TIME_REG);
+               date = readl(config->ioaddr + DATE_REG);
+       } while (time == readl(config->ioaddr + TIME_REG));
        tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
        tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
        tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
@@ -352,6 +354,10 @@ static int spear_rtc_probe(struct platform_device *pdev)
        if (!config)
                return -ENOMEM;
 
+       config->rtc = devm_rtc_allocate_device(&pdev->dev);
+       if (IS_ERR(config->rtc))
+               return PTR_ERR(config->rtc);
+
        /* alarm irqs */
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
@@ -380,16 +386,13 @@ static int spear_rtc_probe(struct platform_device *pdev)
        spin_lock_init(&config->lock);
        platform_set_drvdata(pdev, config);
 
-       config->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-                                       &spear_rtc_ops, THIS_MODULE);
-       if (IS_ERR(config->rtc)) {
-               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
-                               PTR_ERR(config->rtc));
-               status = PTR_ERR(config->rtc);
-               goto err_disable_clock;
-       }
+       config->rtc->ops = &spear_rtc_ops;
+       config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+       config->rtc->range_min = RTC_TIMESTAMP_END_9999;
 
-       config->rtc->uie_unsupported = 1;
+       status = devm_rtc_register_device(config->rtc);
+       if (status)
+               goto err_disable_clock;
 
        if (!device_can_wakeup(&pdev->dev))
                device_init_wakeup(&pdev->dev, 1);
index 711832c..5b3e4da 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/clk/sunxi-ng.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/fs.h>
@@ -48,7 +49,8 @@
 
 /* Alarm 0 (counter) */
 #define SUN6I_ALRM_COUNTER                     0x0020
-#define SUN6I_ALRM_CUR_VAL                     0x0024
+/* This holds the remaining alarm seconds on older SoCs (current value) */
+#define SUN6I_ALRM_COUNTER_HMS                 0x0024
 #define SUN6I_ALRM_EN                          0x0028
 #define SUN6I_ALRM_EN_CNT_EN                   BIT(0)
 #define SUN6I_ALRM_IRQ_EN                      0x002c
 #define SUN6I_YEAR_MIN                         1970
 #define SUN6I_YEAR_OFF                         (SUN6I_YEAR_MIN - 1900)
 
+#define SECS_PER_DAY                           (24 * 3600ULL)
+
 /*
  * There are other differences between models, including:
  *
@@ -133,12 +137,15 @@ struct sun6i_rtc_clk_data {
        unsigned int has_auto_swt : 1;
 };
 
+#define RTC_LINEAR_DAY BIT(0)
+
 struct sun6i_rtc_dev {
        struct rtc_device *rtc;
        const struct sun6i_rtc_clk_data *data;
        void __iomem *base;
        int irq;
-       unsigned long alarm;
+       time64_t alarm;
+       unsigned long flags;
 
        struct clk_hw hw;
        struct clk_hw *int_osc;
@@ -363,23 +370,6 @@ CLK_OF_DECLARE_DRIVER(sun8i_h3_rtc_clk, "allwinner,sun8i-h3-rtc",
 CLK_OF_DECLARE_DRIVER(sun50i_h5_rtc_clk, "allwinner,sun50i-h5-rtc",
                      sun8i_h3_rtc_clk_init);
 
-static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
-       .rc_osc_rate = 16000000,
-       .fixed_prescaler = 32,
-       .has_prescaler = 1,
-       .has_out_clk = 1,
-       .export_iosc = 1,
-       .has_losc_en = 1,
-       .has_auto_swt = 1,
-};
-
-static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
-{
-       sun6i_rtc_clk_init(node, &sun50i_h6_rtc_data);
-}
-CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
-                     sun50i_h6_rtc_clk_init);
-
 /*
  * The R40 user manual is self-conflicting on whether the prescaler is
  * fixed or configurable. The clock diagram shows it as fixed, but there
@@ -467,22 +457,30 @@ static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
        } while ((date != readl(chip->base + SUN6I_RTC_YMD)) ||
                 (time != readl(chip->base + SUN6I_RTC_HMS)));
 
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /*
+                * Newer chips store a linear day number, the manual
+                * does not mandate any epoch base. The BSP driver uses
+                * the UNIX epoch, let's just copy that, as it's the
+                * easiest anyway.
+                */
+               rtc_time64_to_tm((date & 0xffff) * SECS_PER_DAY, rtc_tm);
+       } else {
+               rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
+               rtc_tm->tm_mon  = SUN6I_DATE_GET_MON_VALUE(date) - 1;
+               rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
+
+               /*
+                * switch from (data_year->min)-relative offset to
+                * a (1900)-relative one
+                */
+               rtc_tm->tm_year += SUN6I_YEAR_OFF;
+       }
+
        rtc_tm->tm_sec  = SUN6I_TIME_GET_SEC_VALUE(time);
        rtc_tm->tm_min  = SUN6I_TIME_GET_MIN_VALUE(time);
        rtc_tm->tm_hour = SUN6I_TIME_GET_HOUR_VALUE(time);
 
-       rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
-       rtc_tm->tm_mon  = SUN6I_DATE_GET_MON_VALUE(date);
-       rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
-
-       rtc_tm->tm_mon  -= 1;
-
-       /*
-        * switch from (data_year->min)-relative offset to
-        * a (1900)-relative one
-        */
-       rtc_tm->tm_year += SUN6I_YEAR_OFF;
-
        return 0;
 }
 
@@ -510,36 +508,54 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
        struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
        struct rtc_time *alrm_tm = &wkalrm->time;
        struct rtc_time tm_now;
-       unsigned long time_now = 0;
-       unsigned long time_set = 0;
-       unsigned long time_gap = 0;
-       int ret = 0;
-
-       ret = sun6i_rtc_gettime(dev, &tm_now);
-       if (ret < 0) {
-               dev_err(dev, "Error in getting time\n");
-               return -EINVAL;
-       }
+       time64_t time_set;
+       u32 counter_val, counter_val_hms;
+       int ret;
 
        time_set = rtc_tm_to_time64(alrm_tm);
-       time_now = rtc_tm_to_time64(&tm_now);
-       if (time_set <= time_now) {
-               dev_err(dev, "Date to set in the past\n");
-               return -EINVAL;
-       }
-
-       time_gap = time_set - time_now;
 
-       if (time_gap > U32_MAX) {
-               dev_err(dev, "Date too far in the future\n");
-               return -EINVAL;
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /*
+                * The alarm registers hold the actual alarm time, encoded
+                * in the same way (linear day + HMS) as the current time.
+                */
+               counter_val_hms = SUN6I_TIME_SET_SEC_VALUE(alrm_tm->tm_sec)  |
+                                 SUN6I_TIME_SET_MIN_VALUE(alrm_tm->tm_min)  |
+                                 SUN6I_TIME_SET_HOUR_VALUE(alrm_tm->tm_hour);
+               /* The division will cut off the H:M:S part of alrm_tm. */
+               counter_val = div_u64(rtc_tm_to_time64(alrm_tm), SECS_PER_DAY);
+       } else {
+               /* The alarm register holds the number of seconds left. */
+               time64_t time_now;
+
+               ret = sun6i_rtc_gettime(dev, &tm_now);
+               if (ret < 0) {
+                       dev_err(dev, "Error in getting time\n");
+                       return -EINVAL;
+               }
+
+               time_now = rtc_tm_to_time64(&tm_now);
+               if (time_set <= time_now) {
+                       dev_err(dev, "Date to set in the past\n");
+                       return -EINVAL;
+               }
+               if ((time_set - time_now) > U32_MAX) {
+                       dev_err(dev, "Date too far in the future\n");
+                       return -EINVAL;
+               }
+
+               counter_val = time_set - time_now;
        }
 
        sun6i_rtc_setaie(0, chip);
        writel(0, chip->base + SUN6I_ALRM_COUNTER);
+       if (chip->flags & RTC_LINEAR_DAY)
+               writel(0, chip->base + SUN6I_ALRM_COUNTER_HMS);
        usleep_range(100, 300);
 
-       writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
+       writel(counter_val, chip->base + SUN6I_ALRM_COUNTER);
+       if (chip->flags & RTC_LINEAR_DAY)
+               writel(counter_val_hms, chip->base + SUN6I_ALRM_COUNTER_HMS);
        chip->alarm = time_set;
 
        sun6i_rtc_setaie(wkalrm->enabled, chip);
@@ -571,20 +587,25 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
        u32 date = 0;
        u32 time = 0;
 
-       rtc_tm->tm_year -= SUN6I_YEAR_OFF;
-       rtc_tm->tm_mon += 1;
-
-       date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
-               SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon)  |
-               SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
-
-       if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
-               date |= SUN6I_LEAP_SET_VALUE(1);
-
        time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec)  |
                SUN6I_TIME_SET_MIN_VALUE(rtc_tm->tm_min)  |
                SUN6I_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
 
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /* The division will cut off the H:M:S part of rtc_tm. */
+               date = div_u64(rtc_tm_to_time64(rtc_tm), SECS_PER_DAY);
+       } else {
+               rtc_tm->tm_year -= SUN6I_YEAR_OFF;
+               rtc_tm->tm_mon += 1;
+
+               date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
+                       SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon)  |
+                       SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
+
+               if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
+                       date |= SUN6I_LEAP_SET_VALUE(1);
+       }
+
        /* Check whether registers are writable */
        if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
                           SUN6I_LOSC_CTRL_ACC_MASK, 50)) {
@@ -668,11 +689,35 @@ static int sun6i_rtc_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(sun6i_rtc_pm_ops,
        sun6i_rtc_suspend, sun6i_rtc_resume);
 
+static void sun6i_rtc_bus_clk_cleanup(void *data)
+{
+       struct clk *bus_clk = data;
+
+       clk_disable_unprepare(bus_clk);
+}
+
 static int sun6i_rtc_probe(struct platform_device *pdev)
 {
        struct sun6i_rtc_dev *chip = sun6i_rtc;
+       struct device *dev = &pdev->dev;
+       struct clk *bus_clk;
        int ret;
 
+       bus_clk = devm_clk_get_optional(dev, "bus");
+       if (IS_ERR(bus_clk))
+               return PTR_ERR(bus_clk);
+
+       if (bus_clk) {
+               ret = clk_prepare_enable(bus_clk);
+               if (ret)
+                       return ret;
+
+               ret = devm_add_action_or_reset(dev, sun6i_rtc_bus_clk_cleanup,
+                                              bus_clk);
+               if (ret)
+                       return ret;
+       }
+
        if (!chip) {
                chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
                if (!chip)
@@ -683,10 +728,18 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
                chip->base = devm_platform_ioremap_resource(pdev, 0);
                if (IS_ERR(chip->base))
                        return PTR_ERR(chip->base);
+
+               if (IS_REACHABLE(CONFIG_SUN6I_RTC_CCU)) {
+                       ret = sun6i_rtc_ccu_probe(dev, chip->base);
+                       if (ret)
+                               return ret;
+               }
        }
 
        platform_set_drvdata(pdev, chip);
 
+       chip->flags = (unsigned long)of_device_get_match_data(&pdev->dev);
+
        chip->irq = platform_get_irq(pdev, 0);
        if (chip->irq < 0)
                return chip->irq;
@@ -733,7 +786,10 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
                return PTR_ERR(chip->rtc);
 
        chip->rtc->ops = &sun6i_rtc_ops;
-       chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
+       if (chip->flags & RTC_LINEAR_DAY)
+               chip->rtc->range_max = (65536 * SECS_PER_DAY) - 1;
+       else
+               chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
 
        ret = devm_rtc_register_device(chip->rtc);
        if (ret)
@@ -758,6 +814,8 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = {
        { .compatible = "allwinner,sun8i-v3-rtc" },
        { .compatible = "allwinner,sun50i-h5-rtc" },
        { .compatible = "allwinner,sun50i-h6-rtc" },
+       { .compatible = "allwinner,sun50i-h616-rtc",
+               .data = (void *)RTC_LINEAR_DAY },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
index 2018614..6eaa932 100644 (file)
@@ -432,14 +432,21 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
+       ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
                            wm8350_rtc_update_handler, 0,
                            "RTC Seconds", wm8350);
+       if (ret)
+               return ret;
+
        wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
 
-       wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
+       ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
                            wm8350_rtc_alarm_handler, 0,
                            "RTC Alarm", wm8350);
+       if (ret) {
+               wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
+               return ret;
+       }
 
        return 0;
 }
index cf68a9b..d3d0054 100644 (file)
@@ -180,8 +180,6 @@ static int xgene_rtc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       /* HW does not support update faster than 1 seconds */
-       pdata->rtc->uie_unsupported = 1;
        pdata->rtc->ops = &xgene_rtc_ops;
        pdata->rtc->range_max = U32_MAX;
 
index f0763e3..cb24917 100644 (file)
@@ -745,9 +745,7 @@ sclp_sync_wait(void)
        /* Loop until driver state indicates finished request */
        while (sclp_running_state != sclp_running_state_idle) {
                /* Check for expired request timer */
-               if (timer_pending(&sclp_request_timer) &&
-                   get_tod_clock_fast() > timeout &&
-                   del_timer(&sclp_request_timer))
+               if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer))
                        sclp_request_timer.function(&sclp_request_timer);
                cpu_relax();
        }
index de02886..fe5ee26 100644 (file)
@@ -109,8 +109,7 @@ static void sclp_console_sync_queue(void)
        unsigned long flags;
 
        spin_lock_irqsave(&sclp_con_lock, flags);
-       if (timer_pending(&sclp_con_timer))
-               del_timer(&sclp_con_timer);
+       del_timer(&sclp_con_timer);
        while (sclp_con_queue_running) {
                spin_unlock_irqrestore(&sclp_con_lock, flags);
                sclp_sync_wait();
index 7bc4e4a..3b4e7e5 100644 (file)
@@ -231,8 +231,7 @@ sclp_vt220_emit_current(void)
                        list_add_tail(&sclp_vt220_current_request->list,
                                      &sclp_vt220_outqueue);
                        sclp_vt220_current_request = NULL;
-                       if (timer_pending(&sclp_vt220_timer))
-                               del_timer(&sclp_vt220_timer);
+                       del_timer(&sclp_vt220_timer);
                }
                sclp_vt220_flush_later = 0;
        }
@@ -776,8 +775,7 @@ static void __sclp_vt220_flush_buffer(void)
 
        sclp_vt220_emit_current();
        spin_lock_irqsave(&sclp_vt220_lock, flags);
-       if (timer_pending(&sclp_vt220_timer))
-               del_timer(&sclp_vt220_timer);
+       del_timer(&sclp_vt220_timer);
        while (sclp_vt220_queue_running) {
                spin_unlock_irqrestore(&sclp_vt220_lock, flags);
                sclp_sync_wait();
index 7ada994..38cc156 100644 (file)
@@ -354,10 +354,10 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
        if ((
                sense[0] == SENSE_DATA_CHECK      ||
                sense[0] == SENSE_EQUIPMENT_CHECK ||
-               sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
+               sense[0] == (SENSE_EQUIPMENT_CHECK | SENSE_DEFERRED_UNIT_CHECK)
        ) && (
                sense[1] == SENSE_DRIVE_ONLINE ||
-               sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
+               sense[1] == (SENSE_BEGINNING_OF_TAPE | SENSE_WRITE_MODE)
        )) {
                switch (request->op) {
                /*
index 05e136c..6d63b96 100644 (file)
@@ -113,16 +113,10 @@ ccw_device_timeout(struct timer_list *t)
 void
 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
 {
-       if (expires == 0) {
+       if (expires == 0)
                del_timer(&cdev->private->timer);
-               return;
-       }
-       if (timer_pending(&cdev->private->timer)) {
-               if (mod_timer(&cdev->private->timer, jiffies + expires))
-                       return;
-       }
-       cdev->private->timer.expires = jiffies + expires;
-       add_timer(&cdev->private->timer);
+       else
+               mod_timer(&cdev->private->timer, jiffies + expires);
 }
 
 int
index 8b46368..ab6a749 100644 (file)
@@ -112,16 +112,10 @@ static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
 {
        struct eadm_private *private = get_eadm_private(sch);
 
-       if (expires == 0) {
+       if (expires == 0)
                del_timer(&private->timer);
-               return;
-       }
-       if (timer_pending(&private->timer)) {
-               if (mod_timer(&private->timer, jiffies + expires))
-                       return;
-       }
-       private->timer.expires = jiffies + expires;
-       add_timer(&private->timer);
+       else
+               mod_timer(&private->timer, jiffies + expires);
 }
 
 static void eadm_subchannel_irq(struct subchannel *sch)
index 8fd5a17..6a65885 100644 (file)
@@ -315,6 +315,7 @@ struct ap_perms {
        unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
        unsigned long apm[BITS_TO_LONGS(AP_DEVICES)];
        unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
+       unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)];
 };
 extern struct ap_perms ap_perms;
 extern struct mutex ap_perms_mutex;
index cf23ce1..7f69ca6 100644 (file)
@@ -155,7 +155,7 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
        /*
         * The cca_xxx2protkey call may fail when a card has been
         * addressed where the master key was changed after last fetch
-        * of the mkvp into the cache. Try 3 times: First witout verify
+        * of the mkvp into the cache. Try 3 times: First without verify
         * then with verify and last round with verify and old master
         * key verification pattern match not ignored.
         */
index 7dc2636..6e08d04 100644 (file)
@@ -1189,13 +1189,6 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  * @matrix_mdev: a mediated matrix device
  * @kvm: reference to KVM instance
  *
- * Note: The matrix_dev->lock must be taken prior to calling
- * this function; however, the lock will be temporarily released while the
- * guest's AP configuration is set to avoid a potential lockdep splat.
- * The kvm->lock is taken to set the guest's AP configuration which, under
- * certain circumstances, will result in a circular lock dependency if this is
- * done under the @matrix_mdev->lock.
- *
  * Return: 0 if no other mediated matrix device has a reference to @kvm;
  * otherwise, returns an -EPERM.
  */
@@ -1269,18 +1262,11 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
  * by @matrix_mdev.
  *
  * @matrix_mdev: a matrix mediated device
- * @kvm: the pointer to the kvm structure being unset.
- *
- * Note: The matrix_dev->lock must be taken prior to calling
- * this function; however, the lock will be temporarily released while the
- * guest's AP configuration is cleared to avoid a potential lockdep splat.
- * The kvm->lock is taken to clear the guest's AP configuration which, under
- * certain circumstances, will result in a circular lock dependency if this is
- * done under the @matrix_mdev->lock.
  */
-static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev,
-                                  struct kvm *kvm)
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
 {
+       struct kvm *kvm = matrix_mdev->kvm;
+
        if (kvm && kvm->arch.crypto.crycbd) {
                down_write(&kvm->arch.crypto.pqap_hook_rwsem);
                kvm->arch.crypto.pqap_hook = NULL;
@@ -1311,7 +1297,7 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
        matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
 
        if (!data)
-               vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
+               vfio_ap_mdev_unset_kvm(matrix_mdev);
        else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
                notify_rc = NOTIFY_DONE;
 
@@ -1448,7 +1434,7 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
                                 &matrix_mdev->iommu_notifier);
        vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
                                 &matrix_mdev->group_notifier);
-       vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
+       vfio_ap_mdev_unset_kvm(matrix_mdev);
 }
 
 static int vfio_ap_mdev_get_device_info(unsigned long arg)
index 80e2a30..aa6dc3c 100644 (file)
@@ -285,10 +285,53 @@ static ssize_t aqmask_store(struct device *dev,
 
 static DEVICE_ATTR_RW(aqmask);
 
+static ssize_t admask_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       int i, rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       if (mutex_lock_interruptible(&ap_perms_mutex))
+               return -ERESTARTSYS;
+
+       buf[0] = '0';
+       buf[1] = 'x';
+       for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
+               snprintf(buf + 2 + 2 * i * sizeof(long),
+                        PAGE_SIZE - 2 - 2 * i * sizeof(long),
+                        "%016lx", zcdndev->perms.adm[i]);
+       buf[2 + 2 * i * sizeof(long)] = '\n';
+       buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+       rc = 2 + 2 * i * sizeof(long) + 1;
+
+       mutex_unlock(&ap_perms_mutex);
+
+       return rc;
+}
+
+static ssize_t admask_store(struct device *dev,
+                           struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       int rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
+                              AP_DOMAINS, &ap_perms_mutex);
+       if (rc)
+               return rc;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(admask);
+
 static struct attribute *zcdn_dev_attrs[] = {
        &dev_attr_ioctlmask.attr,
        &dev_attr_apmask.attr,
        &dev_attr_aqmask.attr,
+       &dev_attr_admask.attr,
        NULL
 };
 
@@ -880,11 +923,22 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
        if (rc)
                goto out;
 
+       tdom = *domain;
+       if (perms != &ap_perms && tdom < AP_DOMAINS) {
+               if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+                       if (!test_bit_inv(tdom, perms->adm)) {
+                               rc = -ENODEV;
+                               goto out;
+                       }
+               } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+                       rc = -EOPNOTSUPP;
+                       goto out;
+               }
+       }
        /*
         * If a valid target domain is set and this domain is NOT a usage
         * domain but a control only domain, autoselect target domain.
         */
-       tdom = *domain;
        if (tdom < AP_DOMAINS &&
            !ap_test_config_usage_domain(tdom) &&
            ap_test_config_ctrl_domain(tdom))
@@ -1062,6 +1116,18 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
        if (rc)
                goto out_free;
 
+       if (perms != &ap_perms && domain < AUTOSEL_DOM) {
+               if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+                       if (!test_bit_inv(domain, perms->adm)) {
+                               rc = -ENODEV;
+                               goto out_free;
+                       }
+               } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+                       rc = -EOPNOTSUPP;
+                       goto out_free;
+               }
+       }
+
        pref_zc = NULL;
        pref_zq = NULL;
        spin_lock(&zcrypt_list_lock);
index 3e259be..fcbd537 100644 (file)
@@ -90,7 +90,7 @@ static ssize_t online_store(struct device *dev,
        list_for_each_entry(zq, &zc->zqueues, list)
                maxzqs++;
        if (maxzqs > 0)
-               zq_uelist = kcalloc(maxzqs + 1, sizeof(zq), GFP_ATOMIC);
+               zq_uelist = kcalloc(maxzqs + 1, sizeof(*zq_uelist), GFP_ATOMIC);
        list_for_each_entry(zq, &zc->zqueues, list)
                if (zcrypt_queue_force_online(zq, online))
                        if (zq_uelist) {
index 9ce5a71..98d33f9 100644 (file)
@@ -1109,7 +1109,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
        if (kb->head.type == TOKTYPE_NON_CCA &&
            kb->head.version == TOKVER_EP11_AES) {
                has_header = true;
-               keysize = kb->head.len < keysize ? kb->head.len : keysize;
+               keysize = min_t(size_t, kb->head.len, keysize);
        }
 
        /* request cprb and payload */
index 7d41dfe..48c4dad 100644 (file)
 
 #include "ifcvf_base.h"
 
-static inline u8 ifc_ioread8(u8 __iomem *addr)
-{
-       return ioread8(addr);
-}
-static inline u16 ifc_ioread16 (__le16 __iomem *addr)
+struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
 {
-       return ioread16(addr);
+       return container_of(hw, struct ifcvf_adapter, vf);
 }
 
-static inline u32 ifc_ioread32(__le32 __iomem *addr)
+u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
 {
-       return ioread32(addr);
-}
+       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
-{
-       iowrite8(value, addr);
-}
+       vp_iowrite16(qid, &cfg->queue_select);
+       vp_iowrite16(vector, &cfg->queue_msix_vector);
 
-static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
-{
-       iowrite16(value, addr);
+       return vp_ioread16(&cfg->queue_msix_vector);
 }
 
-static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
+u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
 {
-       iowrite32(value, addr);
-}
+       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-static void ifc_iowrite64_twopart(u64 val,
-                                 __le32 __iomem *lo, __le32 __iomem *hi)
-{
-       ifc_iowrite32((u32)val, lo);
-       ifc_iowrite32(val >> 32, hi);
-}
+       cfg = hw->common_cfg;
+       vp_iowrite16(vector,  &cfg->msix_config);
 
-struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
-{
-       return container_of(hw, struct ifcvf_adapter, vf);
+       return vp_ioread16(&cfg->msix_config);
 }
 
 static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
@@ -158,15 +142,16 @@ next:
                return -EIO;
        }
 
-       hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
+       hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
 
        for (i = 0; i < hw->nr_vring; i++) {
-               ifc_iowrite16(i, &hw->common_cfg->queue_select);
-               notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
+               vp_iowrite16(i, &hw->common_cfg->queue_select);
+               notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off);
                hw->vring[i].notify_addr = hw->notify_base +
                        notify_off * hw->notify_off_multiplier;
                hw->vring[i].notify_pa = hw->notify_base_pa +
                        notify_off * hw->notify_off_multiplier;
+               hw->vring[i].irq = -EINVAL;
        }
 
        hw->lm_cfg = hw->base[IFCVF_LM_BAR];
@@ -176,17 +161,20 @@ next:
                  hw->common_cfg, hw->notify_base, hw->isr,
                  hw->dev_cfg, hw->notify_off_multiplier);
 
+       hw->vqs_reused_irq = -EINVAL;
+       hw->config_irq = -EINVAL;
+
        return 0;
 }
 
 u8 ifcvf_get_status(struct ifcvf_hw *hw)
 {
-       return ifc_ioread8(&hw->common_cfg->device_status);
+       return vp_ioread8(&hw->common_cfg->device_status);
 }
 
 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
 {
-       ifc_iowrite8(status, &hw->common_cfg->device_status);
+       vp_iowrite8(status, &hw->common_cfg->device_status);
 }
 
 void ifcvf_reset(struct ifcvf_hw *hw)
@@ -214,11 +202,11 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
        u32 features_lo, features_hi;
        u64 features;
 
-       ifc_iowrite32(0, &cfg->device_feature_select);
-       features_lo = ifc_ioread32(&cfg->device_feature);
+       vp_iowrite32(0, &cfg->device_feature_select);
+       features_lo = vp_ioread32(&cfg->device_feature);
 
-       ifc_iowrite32(1, &cfg->device_feature_select);
-       features_hi = ifc_ioread32(&cfg->device_feature);
+       vp_iowrite32(1, &cfg->device_feature_select);
+       features_hi = vp_ioread32(&cfg->device_feature);
 
        features = ((u64)features_hi << 32) | features_lo;
 
@@ -271,12 +259,12 @@ void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
 
        WARN_ON(offset + length > hw->config_size);
        do {
-               old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+               old_gen = vp_ioread8(&hw->common_cfg->config_generation);
                p = dst;
                for (i = 0; i < length; i++)
-                       *p++ = ifc_ioread8(hw->dev_cfg + offset + i);
+                       *p++ = vp_ioread8(hw->dev_cfg + offset + i);
 
-               new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+               new_gen = vp_ioread8(&hw->common_cfg->config_generation);
        } while (old_gen != new_gen);
 }
 
@@ -289,18 +277,18 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
        p = src;
        WARN_ON(offset + length > hw->config_size);
        for (i = 0; i < length; i++)
-               ifc_iowrite8(*p++, hw->dev_cfg + offset + i);
+               vp_iowrite8(*p++, hw->dev_cfg + offset + i);
 }
 
 static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
 {
        struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-       ifc_iowrite32(0, &cfg->guest_feature_select);
-       ifc_iowrite32((u32)features, &cfg->guest_feature);
+       vp_iowrite32(0, &cfg->guest_feature_select);
+       vp_iowrite32((u32)features, &cfg->guest_feature);
 
-       ifc_iowrite32(1, &cfg->guest_feature_select);
-       ifc_iowrite32(features >> 32, &cfg->guest_feature);
+       vp_iowrite32(1, &cfg->guest_feature_select);
+       vp_iowrite32(features >> 32, &cfg->guest_feature);
 }
 
 static int ifcvf_config_features(struct ifcvf_hw *hw)
@@ -329,7 +317,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
        ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
        q_pair_id = qid / hw->nr_vring;
        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
-       last_avail_idx = ifc_ioread16(avail_idx_addr);
+       last_avail_idx = vp_ioread16(avail_idx_addr);
 
        return last_avail_idx;
 }
@@ -344,7 +332,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
        q_pair_id = qid / hw->nr_vring;
        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
        hw->vring[qid].last_avail_idx = num;
-       ifc_iowrite16(num, avail_idx_addr);
+       vp_iowrite16(num, avail_idx_addr);
 
        return 0;
 }
@@ -352,41 +340,23 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 static int ifcvf_hw_enable(struct ifcvf_hw *hw)
 {
        struct virtio_pci_common_cfg __iomem *cfg;
-       struct ifcvf_adapter *ifcvf;
        u32 i;
 
-       ifcvf = vf_to_adapter(hw);
        cfg = hw->common_cfg;
-       ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
-
-       if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
-               IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
-               return -EINVAL;
-       }
-
        for (i = 0; i < hw->nr_vring; i++) {
                if (!hw->vring[i].ready)
                        break;
 
-               ifc_iowrite16(i, &cfg->queue_select);
-               ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+               vp_iowrite16(i, &cfg->queue_select);
+               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
                                     &cfg->queue_desc_hi);
-               ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
                                      &cfg->queue_avail_hi);
-               ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
                                     &cfg->queue_used_hi);
-               ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
-               ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
-
-               if (ifc_ioread16(&cfg->queue_msix_vector) ==
-                   VIRTIO_MSI_NO_VECTOR) {
-                       IFCVF_ERR(ifcvf->pdev,
-                                 "No msix vector for queue %u\n", i);
-                       return -EINVAL;
-               }
-
+               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
                ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
-               ifc_iowrite16(1, &cfg->queue_enable);
+               vp_iowrite16(1, &cfg->queue_enable);
        }
 
        return 0;
@@ -394,18 +364,12 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw)
 
 static void ifcvf_hw_disable(struct ifcvf_hw *hw)
 {
-       struct virtio_pci_common_cfg __iomem *cfg;
        u32 i;
 
-       cfg = hw->common_cfg;
-       ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
-
+       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
        for (i = 0; i < hw->nr_vring; i++) {
-               ifc_iowrite16(i, &cfg->queue_select);
-               ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
+               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
        }
-
-       ifc_ioread16(&cfg->queue_msix_vector);
 }
 
 int ifcvf_start_hw(struct ifcvf_hw *hw)
@@ -433,5 +397,5 @@ void ifcvf_stop_hw(struct ifcvf_hw *hw)
 
 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
 {
-       ifc_iowrite16(qid, hw->vring[qid].notify_addr);
+       vp_iowrite16(qid, hw->vring[qid].notify_addr);
 }
index c486873..115b61f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
 #include <linux/vdpa.h>
+#include <linux/virtio_pci_modern.h>
 #include <uapi/linux/virtio_net.h>
 #include <uapi/linux/virtio_blk.h>
 #include <uapi/linux/virtio_config.h>
@@ -27,8 +28,6 @@
 
 #define IFCVF_QUEUE_ALIGNMENT  PAGE_SIZE
 #define IFCVF_QUEUE_MAX                32768
-#define IFCVF_MSI_CONFIG_OFF   0
-#define IFCVF_MSI_QUEUE_OFF    1
 #define IFCVF_PCI_MAX_RESOURCE 6
 
 #define IFCVF_LM_CFG_SIZE              0x40
 #define ifcvf_private_to_vf(adapter) \
        (&((struct ifcvf_adapter *)adapter)->vf)
 
+/* all vqs and config interrupt has its own vector */
+#define MSIX_VECTOR_PER_VQ_AND_CONFIG          1
+/* all vqs share a vector, and config interrupt has a separate vector */
+#define MSIX_VECTOR_SHARED_VQ_AND_CONFIG       2
+/* all vqs and config interrupt share a vector */
+#define MSIX_VECTOR_DEV_SHARED                 3
+
 struct vring_info {
        u64 desc;
        u64 avail;
@@ -60,25 +66,27 @@ struct ifcvf_hw {
        u8 __iomem *isr;
        /* Live migration */
        u8 __iomem *lm_cfg;
-       u16 nr_vring;
        /* Notification bar number */
        u8 notify_bar;
+       u8 msix_vector_status;
+       /* virtio-net or virtio-blk device config size */
+       u32 config_size;
        /* Notificaiton bar address */
        void __iomem *notify_base;
        phys_addr_t notify_base_pa;
        u32 notify_off_multiplier;
+       u32 dev_type;
        u64 req_features;
        u64 hw_features;
-       u32 dev_type;
        struct virtio_pci_common_cfg __iomem *common_cfg;
        void __iomem *dev_cfg;
        struct vring_info vring[IFCVF_MAX_QUEUES];
        void __iomem * const *base;
        char config_msix_name[256];
        struct vdpa_callback config_cb;
-       unsigned int config_irq;
-       /* virtio-net or virtio-blk device config size */
-       u32 config_size;
+       int config_irq;
+       int vqs_reused_irq;
+       u16 nr_vring;
 };
 
 struct ifcvf_adapter {
@@ -123,4 +131,6 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
 int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
 u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
+u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
+u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
 #endif /* _IFCVF_H_ */
index d1a6b5a..4366320 100644 (file)
@@ -27,7 +27,7 @@ static irqreturn_t ifcvf_config_changed(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
+static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
 {
        struct vring_info *vring = arg;
 
@@ -37,76 +37,324 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
+{
+       struct ifcvf_hw *vf = arg;
+       struct vring_info *vring;
+       int i;
+
+       for (i = 0; i < vf->nr_vring; i++) {
+               vring = &vf->vring[i];
+               if (vring->cb.callback)
+                       vring->cb.callback(vring->cb.private);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
+{
+       struct ifcvf_hw *vf = arg;
+       u8 isr;
+
+       isr = vp_ioread8(vf->isr);
+       if (isr & VIRTIO_PCI_ISR_CONFIG)
+               ifcvf_config_changed(irq, arg);
+
+       return ifcvf_vqs_reused_intr_handler(irq, arg);
+}
+
 static void ifcvf_free_irq_vectors(void *data)
 {
        pci_free_irq_vectors(data);
 }
 
-static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
+static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
 {
        struct pci_dev *pdev = adapter->pdev;
        struct ifcvf_hw *vf = &adapter->vf;
        int i;
 
+       for (i = 0; i < vf->nr_vring; i++) {
+               if (vf->vring[i].irq != -EINVAL) {
+                       devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+                       vf->vring[i].irq = -EINVAL;
+               }
+       }
+}
 
-       for (i = 0; i < queues; i++) {
-               devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
-               vf->vring[i].irq = -EINVAL;
+static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->vqs_reused_irq != -EINVAL) {
+               devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
+               vf->vqs_reused_irq = -EINVAL;
        }
 
-       devm_free_irq(&pdev->dev, vf->config_irq, vf);
+}
+
+static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               ifcvf_free_per_vq_irq(adapter);
+       else
+               ifcvf_free_vqs_reused_irq(adapter);
+}
+
+static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->config_irq == -EINVAL)
+               return;
+
+       /* If the irq is shared by all vqs and the config interrupt,
+        * it is already freed in ifcvf_free_vq_irq, so here only
+        * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
+        */
+       if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
+               devm_free_irq(&pdev->dev, vf->config_irq, vf);
+               vf->config_irq = -EINVAL;
+       }
+}
+
+static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+
+       ifcvf_free_vq_irq(adapter);
+       ifcvf_free_config_irq(adapter);
        ifcvf_free_irq_vectors(pdev);
 }
 
-static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+/* ifcvf MSIX vectors allocator, this helper tries to allocate
+ * vectors for all virtqueues and the config interrupt.
+ * It returns the number of allocated vectors, negative
+ * return value when fails.
+ */
+static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
 {
        struct pci_dev *pdev = adapter->pdev;
        struct ifcvf_hw *vf = &adapter->vf;
-       int vector, i, ret, irq;
-       u16 max_intr;
+       int max_intr, ret;
 
        /* all queues and config interrupt  */
        max_intr = vf->nr_vring + 1;
+       ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
 
-       ret = pci_alloc_irq_vectors(pdev, max_intr,
-                                   max_intr, PCI_IRQ_MSIX);
        if (ret < 0) {
                IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
                return ret;
        }
 
+       if (ret < max_intr)
+               IFCVF_INFO(pdev,
+                          "Requested %u vectors, however only %u allocated, lower performance\n",
+                          max_intr, ret);
+
+       return ret;
+}
+
+static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vf->vqs_reused_irq = -EINVAL;
+       for (i = 0; i < vf->nr_vring; i++) {
+               snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
+               vector = i;
+               irq = pci_irq_vector(pdev, vector);
+               ret = devm_request_irq(&pdev->dev, irq,
+                                      ifcvf_vq_intr_handler, 0,
+                                      vf->vring[i].msix_name,
+                                      &vf->vring[i]);
+               if (ret) {
+                       IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
+                       goto err;
+               }
+
+               vf->vring[i].irq = irq;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+}
+
+static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vector = 0;
+       snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
+       irq = pci_irq_vector(pdev, vector);
+       ret = devm_request_irq(&pdev->dev, irq,
+                              ifcvf_vqs_reused_intr_handler, 0,
+                              vf->vring[0].msix_name, vf);
+       if (ret) {
+               IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
+               goto err;
+       }
+
+       vf->vqs_reused_irq = irq;
+       for (i = 0; i < vf->nr_vring; i++) {
+               vf->vring[i].irq = -EINVAL;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+}
+
+static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vector = 0;
+       snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
+       irq = pci_irq_vector(pdev, vector);
+       ret = devm_request_irq(&pdev->dev, irq,
+                              ifcvf_dev_intr_handler, 0,
+                              vf->vring[0].msix_name, vf);
+       if (ret) {
+               IFCVF_ERR(pdev, "Failed to request irq for the device\n");
+               goto err;
+       }
+
+       vf->vqs_reused_irq = irq;
+       for (i = 0; i < vf->nr_vring; i++) {
+               vf->vring[i].irq = -EINVAL;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       vf->config_irq = irq;
+       ret = ifcvf_set_config_vector(vf, vector);
+       if (ret == VIRTIO_MSI_NO_VECTOR) {
+               IFCVF_ERR(pdev, "No msix vector for device config\n");
+               goto err;
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+
+}
+
+static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+       int ret;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               ret = ifcvf_request_per_vq_irq(adapter);
+       else
+               ret = ifcvf_request_vqs_reused_irq(adapter);
+
+       return ret;
+}
+
+static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int config_vector, ret;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
+               return 0;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */
+               config_vector = vf->nr_vring;
+
+       if (vf->msix_vector_status ==  MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
+               /* vector 0 for vqs and 1 for config interrupt */
+               config_vector = 1;
+
        snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
                 pci_name(pdev));
-       vector = 0;
-       vf->config_irq = pci_irq_vector(pdev, vector);
+       vf->config_irq = pci_irq_vector(pdev, config_vector);
        ret = devm_request_irq(&pdev->dev, vf->config_irq,
                               ifcvf_config_changed, 0,
                               vf->config_msix_name, vf);
        if (ret) {
                IFCVF_ERR(pdev, "Failed to request config irq\n");
-               return ret;
+               goto err;
        }
 
-       for (i = 0; i < vf->nr_vring; i++) {
-               snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
-                        pci_name(pdev), i);
-               vector = i + IFCVF_MSI_QUEUE_OFF;
-               irq = pci_irq_vector(pdev, vector);
-               ret = devm_request_irq(&pdev->dev, irq,
-                                      ifcvf_intr_handler, 0,
-                                      vf->vring[i].msix_name,
-                                      &vf->vring[i]);
-               if (ret) {
-                       IFCVF_ERR(pdev,
-                                 "Failed to request irq for vq %d\n", i);
-                       ifcvf_free_irq(adapter, i);
+       ret = ifcvf_set_config_vector(vf, config_vector);
+       if (ret == VIRTIO_MSI_NO_VECTOR) {
+               IFCVF_ERR(pdev, "No msix vector for device config\n");
+               goto err;
+       }
 
-                       return ret;
-               }
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
 
-               vf->vring[i].irq = irq;
+       return -EFAULT;
+}
+
+static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+       int nvectors, ret, max_intr;
+
+       nvectors = ifcvf_alloc_vectors(adapter);
+       if (nvectors <= 0)
+               return -EFAULT;
+
+       vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
+       max_intr = vf->nr_vring + 1;
+       if (nvectors < max_intr)
+               vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
+
+       if (nvectors == 1) {
+               vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
+               ret = ifcvf_request_dev_irq(adapter);
+
+               return ret;
        }
 
+       ret = ifcvf_request_vq_irq(adapter);
+       if (ret)
+               return ret;
+
+       ret = ifcvf_request_config_irq(adapter);
+
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -263,7 +511,7 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
 
        if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
                ifcvf_stop_datapath(adapter);
-               ifcvf_free_irq(adapter, vf->nr_vring);
+               ifcvf_free_irq(adapter);
        }
 
        ifcvf_reset_vring(adapter);
@@ -348,7 +596,7 @@ static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
 {
        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-       return ioread8(&vf->common_cfg->config_generation);
+       return vp_ioread8(&vf->common_cfg->config_generation);
 }
 
 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
@@ -410,7 +658,10 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
 {
        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-       return vf->vring[qid].irq;
+       if (vf->vqs_reused_irq < 0)
+               return vf->vring[qid].irq;
+       else
+               return -EINVAL;
 }
 
 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
index d0f9107..2f4fb09 100644 (file)
@@ -1475,7 +1475,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
        virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
        struct mlx5_core_dev *pfmdev;
        size_t read;
-       u8 mac[ETH_ALEN];
+       u8 mac[ETH_ALEN], mac_back[ETH_ALEN];
 
        pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
        switch (cmd) {
@@ -1489,6 +1489,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
                        break;
                }
 
+               if (is_zero_ether_addr(mac))
+                       break;
+
                if (!is_zero_ether_addr(ndev->config.mac)) {
                        if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
                                mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
@@ -1503,7 +1506,47 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
                        break;
                }
 
+               /* backup the original mac address so that if failed to add the forward rules
+                * we could restore it
+                */
+               memcpy(mac_back, ndev->config.mac, ETH_ALEN);
+
                memcpy(ndev->config.mac, mac, ETH_ALEN);
+
+               /* Need recreate the flow table entry, so that the packet could forward back
+                */
+               remove_fwd_to_tir(ndev);
+
+               if (add_fwd_to_tir(ndev)) {
+                       mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
+
+                       /* Although it hardly run here, we still need double check */
+                       if (is_zero_ether_addr(mac_back)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n");
+                               break;
+                       }
+
+                       /* Try to restore original mac address to MFPS table, and try to restore
+                        * the forward rule entry.
+                        */
+                       if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n",
+                                              ndev->config.mac);
+                       }
+
+                       if (mlx5_mpfs_add_mac(pfmdev, mac_back)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n",
+                                              mac_back);
+                       }
+
+                       memcpy(ndev->config.mac, mac_back, ETH_ALEN);
+
+                       if (add_fwd_to_tir(ndev))
+                               mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
+
+                       break;
+               }
+
                status = VIRTIO_NET_OK;
                break;
 
@@ -1669,7 +1712,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
                return;
 
        if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
-               if (!mvdev->cvq.ready)
+               if (!mvdev->wq || !mvdev->cvq.ready)
                        return;
 
                wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
@@ -2565,6 +2608,28 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
        return ret;
 }
 
+static int config_func_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+       void *in;
+       int err;
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu,
+                mtu + MLX5V_ETH_HARD_MTU);
+       MLX5_SET(modify_nic_vport_context_in, in, opcode,
+                MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+
+       err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
+
+       kvfree(in);
+       return err;
+}
+
 static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
                             const struct vdpa_dev_set_config *add_config)
 {
@@ -2624,6 +2689,13 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        init_mvqs(ndev);
        mutex_init(&ndev->reslock);
        config = &ndev->config;
+
+       if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
+               err = config_func_mtu(mdev, add_config->net.mtu);
+               if (err)
+                       goto err_mtu;
+       }
+
        err = query_mtu(mdev, &mtu);
        if (err)
                goto err_mtu;
@@ -2707,9 +2779,12 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
        struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
        struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct workqueue_struct *wq;
 
        mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
-       destroy_workqueue(mvdev->wq);
+       wq = mvdev->wq;
+       mvdev->wq = NULL;
+       destroy_workqueue(wq);
        _vdpa_unregister_device(dev);
        mgtdev->ndev = NULL;
 }
@@ -2741,7 +2816,8 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        mgtdev->mgtdev.device = mdev->device;
        mgtdev->mgtdev.id_table = id_table;
        mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
-                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
+                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP) |
+                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
        mgtdev->mgtdev.max_supported_vqs =
                MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
        mgtdev->mgtdev.supported_features = get_supported_features(mdev);
index 1ea5254..2b75c00 100644 (file)
@@ -232,7 +232,7 @@ static int vdpa_name_match(struct device *dev, const void *data)
        return (strcmp(dev_name(&vdev->dev), data) == 0);
 }
 
-static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        struct device *dev;
 
@@ -257,7 +257,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
  *
  * Return: Returns an error when fail to add device to vDPA bus
  */
-int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        if (!vdev->mdev)
                return -EINVAL;
@@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device);
  *
  * Return: Returns an error when fail to add to vDPA bus
  */
-int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        int err;
 
index 40b0983..5829cf2 100644 (file)
@@ -62,8 +62,12 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
         */
        if (start == 0 && last == ULONG_MAX) {
                u64 mid = last / 2;
+               int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
+                               perm, opaque);
+
+               if (err)
+                       return err;
 
-               vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
                addr += mid + 1;
                start = mid + 1;
        }
index ec5249e..4c2f0bd 100644 (file)
@@ -42,7 +42,7 @@ struct vhost_vdpa {
        struct device dev;
        struct cdev cdev;
        atomic_t opened;
-       int nvqs;
+       u32 nvqs;
        int virtio_id;
        int minor;
        struct eventfd_ctx *config_ctx;
@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
                return;
 
        irq = ops->get_vq_irq(vdpa, qid);
+       if (irq < 0)
+               return;
+
        irq_bypass_unregister_producer(&vq->call_ctx.producer);
-       if (!vq->call_ctx.ctx || irq < 0)
+       if (!vq->call_ctx.ctx)
                return;
 
        vq->call_ctx.producer.token = vq->call_ctx.ctx;
@@ -158,7 +161,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
        u8 status, status_old;
-       int ret, nvqs = v->nvqs;
+       u32 nvqs = v->nvqs;
+       int ret;
        u16 i;
 
        if (copy_from_user(&status, statusp, sizeof(status)))
@@ -355,6 +359,30 @@ static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
        return 0;
 }
 
+static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+       u32 size;
+
+       size = ops->get_config_size(vdpa);
+
+       if (copy_to_user(argp, &size, sizeof(size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+
+       if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
+               return -EFAULT;
+
+       return 0;
+}
+
 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                                   void __user *argp)
 {
@@ -492,6 +520,12 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        case VHOST_VDPA_GET_IOVA_RANGE:
                r = vhost_vdpa_get_iova_range(v, argp);
                break;
+       case VHOST_VDPA_GET_CONFIG_SIZE:
+               r = vhost_vdpa_get_config_size(v, argp);
+               break;
+       case VHOST_VDPA_GET_VQS_COUNT:
+               r = vhost_vdpa_get_vqs_count(v, argp);
+               break;
        default:
                r = vhost_dev_ioctl(&v->vdev, cmd, argp);
                if (r == -ENOIOCTLCMD)
@@ -948,7 +982,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
        struct vhost_vdpa *v;
        struct vhost_dev *dev;
        struct vhost_virtqueue **vqs;
-       int nvqs, i, r, opened;
+       int r, opened;
+       u32 i, nvqs;
 
        v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
 
@@ -1001,7 +1036,7 @@ err:
 
 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
 {
-       int i;
+       u32 i;
 
        for (i = 0; i < v->nvqs; i++)
                vhost_vdpa_unsetup_vq_irq(v, i);
index 1768362..d02173f 100644 (file)
@@ -2550,8 +2550,9 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
                       &vq->avail->idx, r);
                return false;
        }
+       vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
 
-       return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
+       return vq->avail_idx != vq->last_avail_idx;
 }
 EXPORT_SYMBOL_GPL(vhost_enable_notify);
 
index 0ae1a39..a1c467a 100644 (file)
@@ -78,6 +78,7 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
 }
 
 static const struct acpi_device_id vmgenid_ids[] = {
+       { "VMGENCTR", 0 },
        { "VM_GEN_COUNTER", 0 },
        { }
 };
index 492fc26..b5adf6a 100644 (file)
@@ -105,7 +105,7 @@ config VIRTIO_BALLOON
 
 config VIRTIO_MEM
        tristate "Virtio mem driver"
-       depends on X86_64
+       depends on X86_64 || ARM64
        depends on VIRTIO
        depends on MEMORY_HOTPLUG
        depends on MEMORY_HOTREMOVE
@@ -115,8 +115,9 @@ config VIRTIO_MEM
         This driver provides access to virtio-mem paravirtualized memory
         devices, allowing to hotplug and hotunplug memory.
 
-        This driver was only tested under x86-64, but should theoretically
-        work on all architectures that support memory hotplug and hotremove.
+        This driver was only tested under x86-64 and arm64, but should
+        theoretically work on all architectures that support memory hotplug
+        and hotremove.
 
         If unsure, say M.
 
index 22f15f4..75c8d56 100644 (file)
@@ -526,8 +526,9 @@ int virtio_device_restore(struct virtio_device *dev)
                        goto err;
        }
 
-       /* Finally, tell the device we're all set */
-       virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+       /* If restore didn't do it, mark device DRIVER_OK ourselves. */
+       if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
+               virtio_device_ready(dev);
 
        virtio_config_enable(dev);
 
index fdbde1d..d724f67 100644 (file)
@@ -24,46 +24,17 @@ MODULE_PARM_DESC(force_legacy,
                 "Force legacy mode for transitional virtio 1 devices");
 #endif
 
-/* disable irq handlers */
-void vp_disable_cbs(struct virtio_device *vdev)
+/* wait for pending irq handlers */
+void vp_synchronize_vectors(struct virtio_device *vdev)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i;
 
-       if (vp_dev->intx_enabled) {
-               /*
-                * The below synchronize() guarantees that any
-                * interrupt for this line arriving after
-                * synchronize_irq() has completed is guaranteed to see
-                * intx_soft_enabled == false.
-                */
-               WRITE_ONCE(vp_dev->intx_soft_enabled, false);
+       if (vp_dev->intx_enabled)
                synchronize_irq(vp_dev->pci_dev->irq);
-       }
-
-       for (i = 0; i < vp_dev->msix_vectors; ++i)
-               disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
-}
-
-/* enable irq handlers */
-void vp_enable_cbs(struct virtio_device *vdev)
-{
-       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       int i;
-
-       if (vp_dev->intx_enabled) {
-               disable_irq(vp_dev->pci_dev->irq);
-               /*
-                * The above disable_irq() provides TSO ordering and
-                * as such promotes the below store to store-release.
-                */
-               WRITE_ONCE(vp_dev->intx_soft_enabled, true);
-               enable_irq(vp_dev->pci_dev->irq);
-               return;
-       }
 
        for (i = 0; i < vp_dev->msix_vectors; ++i)
-               enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
+               synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
 }
 
 /* the notify function used when creating a virt queue */
@@ -113,9 +84,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
        struct virtio_pci_device *vp_dev = opaque;
        u8 isr;
 
-       if (!READ_ONCE(vp_dev->intx_soft_enabled))
-               return IRQ_NONE;
-
        /* reading the ISR has the effect of also clearing it so it's very
         * important to save off the value. */
        isr = ioread8(vp_dev->isr);
@@ -173,8 +141,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
        snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
                 "%s-config", name);
        err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                         vp_config_changed, IRQF_NO_AUTOEN,
-                         vp_dev->msix_names[v],
+                         vp_config_changed, 0, vp_dev->msix_names[v],
                          vp_dev);
        if (err)
                goto error;
@@ -193,8 +160,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
                snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
                         "%s-virtqueues", name);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                                 vp_vring_interrupt, IRQF_NO_AUTOEN,
-                                 vp_dev->msix_names[v],
+                                 vp_vring_interrupt, 0, vp_dev->msix_names[v],
                                  vp_dev);
                if (err)
                        goto error;
@@ -371,7 +337,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
                         "%s-%s",
                         dev_name(&vp_dev->vdev.dev), names[i]);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
-                                 vring_interrupt, IRQF_NO_AUTOEN,
+                                 vring_interrupt, 0,
                                  vp_dev->msix_names[msix_vec],
                                  vqs[i]);
                if (err)
index 23f6c5c..eb17a29 100644 (file)
@@ -63,7 +63,6 @@ struct virtio_pci_device {
        /* MSI-X support */
        int msix_enabled;
        int intx_enabled;
-       bool intx_soft_enabled;
        cpumask_var_t *msix_affinity_masks;
        /* Name strings for interrupts. This size should be enough,
         * and I'm too lazy to allocate each name separately. */
@@ -102,10 +101,8 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
        return container_of(vdev, struct virtio_pci_device, vdev);
 }
 
-/* disable irq handlers */
-void vp_disable_cbs(struct virtio_device *vdev);
-/* enable irq handlers */
-void vp_enable_cbs(struct virtio_device *vdev);
+/* wait for pending irq handlers */
+void vp_synchronize_vectors(struct virtio_device *vdev);
 /* the notify function used when creating a virt queue */
 bool vp_notify(struct virtqueue *vq);
 /* the config->del_vqs() implementation */
index 34141b9..6f4e34c 100644 (file)
@@ -98,8 +98,8 @@ static void vp_reset(struct virtio_device *vdev)
        /* Flush out the status write, and flush in device writes,
         * including MSi-X interrupts, if any. */
        vp_legacy_get_status(&vp_dev->ldev);
-       /* Disable VQ/configuration callbacks. */
-       vp_disable_cbs(vdev);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
 }
 
 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -185,7 +185,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
 }
 
 static const struct virtio_config_ops virtio_pci_config_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = vp_get,
        .set            = vp_set,
        .get_status     = vp_get_status,
index 5455bc0..a2671a2 100644 (file)
@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
         */
        while (vp_modern_get_status(mdev))
                msleep(1);
-       /* Disable VQ/configuration callbacks. */
-       vp_disable_cbs(vdev);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
 }
 
 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -293,7 +293,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
 
        for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
             pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
-               u8 type, cap_len, id;
+               u8 type, cap_len, id, res_bar;
                u32 tmp32;
                u64 res_offset, res_length;
 
@@ -315,9 +315,14 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
                if (id != required_id)
                        continue;
 
-               /* Type, and ID match, looks good */
                pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
-                                                        bar), bar);
+                                                        bar), &res_bar);
+               if (res_bar >= PCI_STD_NUM_BARS)
+                       continue;
+
+               /* Type and ID match, and the BAR value isn't reserved.
+                * Looks good.
+                */
 
                /* Read the lower 32bit of length and offset */
                pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
@@ -337,6 +342,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
                                                     length_hi), &tmp32);
                res_length |= ((u64)tmp32) << 32;
 
+               *bar = res_bar;
                *offset = res_offset;
                *len = res_length;
 
@@ -380,7 +386,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
 }
 
 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = NULL,
        .set            = NULL,
        .generation     = vp_generation,
@@ -398,7 +403,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
 };
 
 static const struct virtio_config_ops virtio_pci_config_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = vp_get,
        .set            = vp_set,
        .generation     = vp_generation,
index e8b3ff2..591738a 100644 (file)
@@ -35,6 +35,13 @@ vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
        pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
                              &length);
 
+       /* Check if the BAR may have changed since we requested the region. */
+       if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) {
+               dev_err(&dev->dev,
+                       "virtio_pci: bar unexpectedly changed to %u\n", bar);
+               return NULL;
+       }
+
        if (length <= start) {
                dev_err(&dev->dev,
                        "virtio_pci: bad capability len %u (>%u expected)\n",
@@ -120,7 +127,7 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
                                     &bar);
 
                /* Ignore structures with reserved BAR values */
-               if (bar > 0x5)
+               if (bar >= PCI_STD_NUM_BARS)
                        continue;
 
                if (type == cfg_type) {
index 962f147..cfb028c 100644 (file)
@@ -379,19 +379,11 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
 
        flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
 
-       if (flags & VRING_DESC_F_INDIRECT) {
-               dma_unmap_single(vring_dma_dev(vq),
-                                virtio64_to_cpu(vq->vq.vdev, desc->addr),
-                                virtio32_to_cpu(vq->vq.vdev, desc->len),
-                                (flags & VRING_DESC_F_WRITE) ?
-                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       } else {
-               dma_unmap_page(vring_dma_dev(vq),
-                              virtio64_to_cpu(vq->vq.vdev, desc->addr),
-                              virtio32_to_cpu(vq->vq.vdev, desc->len),
-                              (flags & VRING_DESC_F_WRITE) ?
-                              DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       }
+       dma_unmap_page(vring_dma_dev(vq),
+                      virtio64_to_cpu(vq->vq.vdev, desc->addr),
+                      virtio32_to_cpu(vq->vq.vdev, desc->len),
+                      (flags & VRING_DESC_F_WRITE) ?
+                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
 }
 
 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
@@ -984,24 +976,24 @@ static struct virtqueue *vring_create_virtqueue_split(
  * Packed ring specific functions - *_packed().
  */
 
-static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
-                                    struct vring_desc_extra *state)
+static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
+                                    struct vring_desc_extra *extra)
 {
        u16 flags;
 
        if (!vq->use_dma_api)
                return;
 
-       flags = state->flags;
+       flags = extra->flags;
 
        if (flags & VRING_DESC_F_INDIRECT) {
                dma_unmap_single(vring_dma_dev(vq),
-                                state->addr, state->len,
+                                extra->addr, extra->len,
                                 (flags & VRING_DESC_F_WRITE) ?
                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
                dma_unmap_page(vring_dma_dev(vq),
-                              state->addr, state->len,
+                              extra->addr, extra->len,
                               (flags & VRING_DESC_F_WRITE) ?
                               DMA_FROM_DEVICE : DMA_TO_DEVICE);
        }
@@ -1017,19 +1009,11 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
 
        flags = le16_to_cpu(desc->flags);
 
-       if (flags & VRING_DESC_F_INDIRECT) {
-               dma_unmap_single(vring_dma_dev(vq),
-                                le64_to_cpu(desc->addr),
-                                le32_to_cpu(desc->len),
-                                (flags & VRING_DESC_F_WRITE) ?
-                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       } else {
-               dma_unmap_page(vring_dma_dev(vq),
-                              le64_to_cpu(desc->addr),
-                              le32_to_cpu(desc->len),
-                              (flags & VRING_DESC_F_WRITE) ?
-                              DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       }
+       dma_unmap_page(vring_dma_dev(vq),
+                      le64_to_cpu(desc->addr),
+                      le32_to_cpu(desc->len),
+                      (flags & VRING_DESC_F_WRITE) ?
+                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
 }
 
 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
@@ -1303,8 +1287,7 @@ unmap_release:
        for (n = 0; n < total_sg; n++) {
                if (i == err_idx)
                        break;
-               vring_unmap_state_packed(vq,
-                                        &vq->packed.desc_extra[curr]);
+               vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
                curr = vq->packed.desc_extra[curr].next;
                i++;
                if (i >= vq->packed.vring.num)
@@ -1383,8 +1366,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
        if (unlikely(vq->use_dma_api)) {
                curr = id;
                for (i = 0; i < state->num; i++) {
-                       vring_unmap_state_packed(vq,
-                               &vq->packed.desc_extra[curr]);
+                       vring_unmap_extra_packed(vq,
+                                                &vq->packed.desc_extra[curr]);
                        curr = vq->packed.desc_extra[curr].next;
                }
        }
index 085f5a4..c4e82a8 100644 (file)
@@ -1779,7 +1779,7 @@ config BCM7038_WDT
        tristate "BCM63xx/BCM7038 Watchdog"
        select WATCHDOG_CORE
        depends on HAS_IOMEM
-       depends on ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
+       depends on ARCH_BCM4908 || ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
        help
          Watchdog driver for the built-in hardware in Broadcom 7038 and
          later SoCs used in set-top boxes.  BCM7038 was made public
index 436571b..bd06622 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/watchdog.h>
 
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+                               __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
 struct aspeed_wdt {
        struct watchdog_device  wdd;
        void __iomem            *base;
@@ -266,6 +271,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
        wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT;
        watchdog_init_timeout(&wdt->wdd, 0, dev);
 
+       watchdog_set_nowayout(&wdt->wdd, nowayout);
+
        np = dev->of_node;
 
        ofdid = of_match_node(aspeed_wdt_of_table, np);
index 51bfb79..d0c5d47 100644 (file)
@@ -66,6 +66,7 @@ struct imx2_wdt_device {
        struct watchdog_device wdog;
        bool ext_reset;
        bool clk_is_on;
+       bool no_ping;
 };
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -312,12 +313,18 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
 
        wdev->ext_reset = of_property_read_bool(dev->of_node,
                                                "fsl,ext-reset-output");
+       /*
+        * The i.MX7D doesn't support low power mode, so we need to ping the watchdog
+        * during suspend.
+        */
+       wdev->no_ping = !of_device_is_compatible(dev->of_node, "fsl,imx7d-wdt");
        platform_set_drvdata(pdev, wdog);
        watchdog_set_drvdata(wdog, wdev);
        watchdog_set_nowayout(wdog, nowayout);
        watchdog_set_restart_priority(wdog, 128);
        watchdog_init_timeout(wdog, timeout, dev);
-       watchdog_stop_ping_on_suspend(wdog);
+       if (wdev->no_ping)
+               watchdog_stop_ping_on_suspend(wdog);
 
        if (imx2_wdt_is_running(wdev)) {
                imx2_wdt_set_timeout(wdog, wdog->timeout);
@@ -366,9 +373,11 @@ static int __maybe_unused imx2_wdt_suspend(struct device *dev)
                imx2_wdt_ping(wdog);
        }
 
-       clk_disable_unprepare(wdev->clk);
+       if (wdev->no_ping) {
+               clk_disable_unprepare(wdev->clk);
 
-       wdev->clk_is_on = false;
+               wdev->clk_is_on = false;
+       }
 
        return 0;
 }
@@ -380,11 +389,14 @@ static int __maybe_unused imx2_wdt_resume(struct device *dev)
        struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
        int ret;
 
-       ret = clk_prepare_enable(wdev->clk);
-       if (ret)
-               return ret;
+       if (wdev->no_ping) {
+               ret = clk_prepare_enable(wdev->clk);
 
-       wdev->clk_is_on = true;
+               if (ret)
+                       return ret;
+
+               wdev->clk_is_on = true;
+       }
 
        if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
                /*
@@ -407,6 +419,7 @@ static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
 
 static const struct of_device_id imx2_wdt_dt_ids[] = {
        { .compatible = "fsl,imx21-wdt", },
+       { .compatible = "fsl,imx7d-wdt", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids);
index 31b03fa..281a48d 100644 (file)
@@ -84,10 +84,24 @@ static int ixp4xx_wdt_set_timeout(struct watchdog_device *wdd,
        return 0;
 }
 
+static int ixp4xx_wdt_restart(struct watchdog_device *wdd,
+                              unsigned long action, void *data)
+{
+       struct ixp4xx_wdt *iwdt = to_ixp4xx_wdt(wdd);
+
+       __raw_writel(IXP4XX_WDT_KEY, iwdt->base + IXP4XX_OSWK_OFFSET);
+       __raw_writel(0, iwdt->base + IXP4XX_OSWT_OFFSET);
+       __raw_writel(IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE,
+                    iwdt->base + IXP4XX_OSWE_OFFSET);
+
+       return 0;
+}
+
 static const struct watchdog_ops ixp4xx_wdt_ops = {
        .start = ixp4xx_wdt_start,
        .stop = ixp4xx_wdt_stop,
        .set_timeout = ixp4xx_wdt_set_timeout,
+       .restart = ixp4xx_wdt_restart,
        .owner = THIS_MODULE,
 };
 
index 127eefc..e25e6bf 100644 (file)
@@ -238,8 +238,10 @@ static int armada370_start(struct watchdog_device *wdt_dev)
        atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
 
        /* Enable watchdog timer */
-       atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
-                                               dev->data->wdt_enable_bit);
+       reg = dev->data->wdt_enable_bit;
+       if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
+               reg |= TIMER1_ENABLE_BIT;
+       atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg);
 
        /* Enable reset on watchdog */
        reg = readl(dev->rstout);
@@ -312,7 +314,7 @@ static int armada375_stop(struct watchdog_device *wdt_dev)
 static int armada370_stop(struct watchdog_device *wdt_dev)
 {
        struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
-       u32 reg;
+       u32 reg, mask;
 
        /* Disable reset on watchdog */
        reg = readl(dev->rstout);
@@ -320,7 +322,10 @@ static int armada370_stop(struct watchdog_device *wdt_dev)
        writel(reg, dev->rstout);
 
        /* Disable watchdog timer */
-       atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
+       mask = dev->data->wdt_enable_bit;
+       if (wdt_dev->info->options & WDIOF_PRETIMEOUT)
+               mask |= TIMER1_ENABLE_BIT;
+       atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0);
 
        return 0;
 }
index 5791198..41d58ea 100644 (file)
@@ -327,6 +327,7 @@ static SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume);
 static const struct of_device_id rwdt_ids[] = {
        { .compatible = "renesas,rcar-gen2-wdt", },
        { .compatible = "renesas,rcar-gen3-wdt", },
+       { .compatible = "renesas,rcar-gen4-wdt", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, rwdt_ids);
index 117bc2a..db843f8 100644 (file)
@@ -228,6 +228,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(dev);
        if (ret) {
                pm_runtime_put_noidle(dev);
+               pm_runtime_disable(&pdev->dev);
                return dev_err_probe(dev, ret, "runtime pm failed\n");
        }
 
index dd9a744..86ffb58 100644 (file)
@@ -49,7 +49,7 @@
 /* internal variables */
 
 enum tco_reg_layout {
-       sp5100, sb800, efch
+       sp5100, sb800, efch, efch_mmio
 };
 
 struct sp5100_tco {
@@ -86,6 +86,10 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
            dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
            dev->revision < 0x40) {
                return sp5100;
+       } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
+           sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+           sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
+               return efch_mmio;
        } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
            ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
             dev->revision >= 0x41) ||
@@ -209,6 +213,8 @@ static void tco_timer_enable(struct sp5100_tco *tco)
                                          ~EFCH_PM_WATCHDOG_DISABLE,
                                          EFCH_PM_DECODEEN_SECOND_RES);
                break;
+       default:
+               break;
        }
 }
 
@@ -223,14 +229,195 @@ static u32 sp5100_tco_read_pm_reg32(u8 index)
        return val;
 }
 
+static u32 sp5100_tco_request_region(struct device *dev,
+                                    u32 mmio_addr,
+                                    const char *dev_name)
+{
+       if (!devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
+                                    dev_name)) {
+               dev_dbg(dev, "MMIO address 0x%08x already in use\n", mmio_addr);
+               return 0;
+       }
+
+       return mmio_addr;
+}
+
+static u32 sp5100_tco_prepare_base(struct sp5100_tco *tco,
+                                  u32 mmio_addr,
+                                  u32 alt_mmio_addr,
+                                  const char *dev_name)
+{
+       struct device *dev = tco->wdd.parent;
+
+       dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n", mmio_addr);
+
+       if (!mmio_addr && !alt_mmio_addr)
+               return -ENODEV;
+
+       /* Check for MMIO address and alternate MMIO address conflicts */
+       if (mmio_addr)
+               mmio_addr = sp5100_tco_request_region(dev, mmio_addr, dev_name);
+
+       if (!mmio_addr && alt_mmio_addr)
+               mmio_addr = sp5100_tco_request_region(dev, alt_mmio_addr, dev_name);
+
+       if (!mmio_addr) {
+               dev_err(dev, "Failed to reserve MMIO or alternate MMIO region\n");
+               return -EBUSY;
+       }
+
+       tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+       if (!tco->tcobase) {
+               dev_err(dev, "MMIO address 0x%08x failed mapping\n", mmio_addr);
+               devm_release_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+               return -ENOMEM;
+       }
+
+       dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
+
+       return 0;
+}
+
+static int sp5100_tco_timer_init(struct sp5100_tco *tco)
+{
+       struct watchdog_device *wdd = &tco->wdd;
+       struct device *dev = wdd->parent;
+       u32 val;
+
+       val = readl(SP5100_WDT_CONTROL(tco->tcobase));
+       if (val & SP5100_WDT_DISABLED) {
+               dev_err(dev, "Watchdog hardware is disabled\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Save WatchDogFired status, because WatchDogFired flag is
+        * cleared here.
+        */
+       if (val & SP5100_WDT_FIRED)
+               wdd->bootstatus = WDIOF_CARDRESET;
+
+       /* Set watchdog action to reset the system */
+       val &= ~SP5100_WDT_ACTION_RESET;
+       writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+
+       /* Set a reasonable heartbeat before we stop the timer */
+       tco_timer_set_timeout(wdd, wdd->timeout);
+
+       /*
+        * Stop the TCO before we change anything so we don't race with
+        * a zeroed timer.
+        */
+       tco_timer_stop(wdd);
+
+       return 0;
+}
+
+static u8 efch_read_pm_reg8(void __iomem *addr, u8 index)
+{
+       return readb(addr + index);
+}
+
+static void efch_update_pm_reg8(void __iomem *addr, u8 index, u8 reset, u8 set)
+{
+       u8 val;
+
+       val = readb(addr + index);
+       val &= reset;
+       val |= set;
+       writeb(val, addr + index);
+}
+
+static void tco_timer_enable_mmio(void __iomem *addr)
+{
+       efch_update_pm_reg8(addr, EFCH_PM_DECODEEN3,
+                           ~EFCH_PM_WATCHDOG_DISABLE,
+                           EFCH_PM_DECODEEN_SECOND_RES);
+}
+
+static int sp5100_tco_setupdevice_mmio(struct device *dev,
+                                      struct watchdog_device *wdd)
+{
+       struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
+       const char *dev_name = SB800_DEVNAME;
+       u32 mmio_addr = 0, alt_mmio_addr = 0;
+       struct resource *res;
+       void __iomem *addr;
+       int ret;
+       u32 val;
+
+       res = request_mem_region_muxed(EFCH_PM_ACPI_MMIO_PM_ADDR,
+                                      EFCH_PM_ACPI_MMIO_PM_SIZE,
+                                      "sp5100_tco");
+
+       if (!res) {
+               dev_err(dev,
+                       "Memory region 0x%08x already in use\n",
+                       EFCH_PM_ACPI_MMIO_PM_ADDR);
+               return -EBUSY;
+       }
+
+       addr = ioremap(EFCH_PM_ACPI_MMIO_PM_ADDR, EFCH_PM_ACPI_MMIO_PM_SIZE);
+       if (!addr) {
+               dev_err(dev, "Address mapping failed\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /*
+        * EFCH_PM_DECODEEN_WDT_TMREN is dual purpose. This bitfield
+        * enables sp5100_tco register MMIO space decoding. The bitfield
+        * also starts the timer operation. Enable if not already enabled.
+        */
+       val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
+       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+               efch_update_pm_reg8(addr, EFCH_PM_DECODEEN, 0xff,
+                                   EFCH_PM_DECODEEN_WDT_TMREN);
+       }
+
+       /* Error if the timer could not be enabled */
+       val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
+       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+               dev_err(dev, "Failed to enable the timer\n");
+               ret = -EFAULT;
+               goto out;
+       }
+
+       mmio_addr = EFCH_PM_WDT_ADDR;
+
+       /* Determine alternate MMIO base address */
+       val = efch_read_pm_reg8(addr, EFCH_PM_ISACONTROL);
+       if (val & EFCH_PM_ISACONTROL_MMIOEN)
+               alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+                       EFCH_PM_ACPI_MMIO_WDT_OFFSET;
+
+       ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
+       if (!ret) {
+               tco_timer_enable_mmio(addr);
+               ret = sp5100_tco_timer_init(tco);
+       }
+
+out:
+       if (addr)
+               iounmap(addr);
+
+       release_resource(res);
+
+       return ret;
+}
+
 static int sp5100_tco_setupdevice(struct device *dev,
                                  struct watchdog_device *wdd)
 {
        struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
        const char *dev_name;
        u32 mmio_addr = 0, val;
+       u32 alt_mmio_addr = 0;
        int ret;
 
+       if (tco->tco_reg_layout == efch_mmio)
+               return sp5100_tco_setupdevice_mmio(dev, wdd);
+
        /* Request the IO ports used by this driver */
        if (!request_muxed_region(SP5100_IO_PM_INDEX_REG,
                                  SP5100_PM_IOPORTS_SIZE, "sp5100_tco")) {
@@ -247,138 +434,55 @@ static int sp5100_tco_setupdevice(struct device *dev,
                dev_name = SP5100_DEVNAME;
                mmio_addr = sp5100_tco_read_pm_reg32(SP5100_PM_WATCHDOG_BASE) &
                                                                0xfffffff8;
+
+               /*
+                * Secondly, find the watchdog timer MMIO address
+                * from SBResource_MMIO register.
+                */
+
+               /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+               pci_read_config_dword(sp5100_tco_pci,
+                                     SP5100_SB_RESOURCE_MMIO_BASE,
+                                     &val);
+
+               /* Verify MMIO is enabled and using bar0 */
+               if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
+                       alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
                break;
        case sb800:
                dev_name = SB800_DEVNAME;
                mmio_addr = sp5100_tco_read_pm_reg32(SB800_PM_WATCHDOG_BASE) &
                                                                0xfffffff8;
+
+               /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
+               val = sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
+
+               /* Verify MMIO is enabled and using bar0 */
+               if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
+                       alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
                break;
        case efch:
                dev_name = SB800_DEVNAME;
-               /*
-                * On Family 17h devices, the EFCH_PM_DECODEEN_WDT_TMREN bit of
-                * EFCH_PM_DECODEEN not only enables the EFCH_PM_WDT_ADDR memory
-                * region, it also enables the watchdog itself.
-                */
-               if (boot_cpu_data.x86 == 0x17) {
-                       val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
-                       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
-                               sp5100_tco_update_pm_reg8(EFCH_PM_DECODEEN, 0xff,
-                                                         EFCH_PM_DECODEEN_WDT_TMREN);
-                       }
-               }
                val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
                if (val & EFCH_PM_DECODEEN_WDT_TMREN)
                        mmio_addr = EFCH_PM_WDT_ADDR;
+
+               val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
+               if (val & EFCH_PM_ISACONTROL_MMIOEN)
+                       alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+                               EFCH_PM_ACPI_MMIO_WDT_OFFSET;
                break;
        default:
                return -ENODEV;
        }
 
-       /* Check MMIO address conflict */
-       if (!mmio_addr ||
-           !devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
-                                    dev_name)) {
-               if (mmio_addr)
-                       dev_dbg(dev, "MMIO address 0x%08x already in use\n",
-                               mmio_addr);
-               switch (tco->tco_reg_layout) {
-               case sp5100:
-                       /*
-                        * Secondly, Find the watchdog timer MMIO address
-                        * from SBResource_MMIO register.
-                        */
-                       /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
-                       pci_read_config_dword(sp5100_tco_pci,
-                                             SP5100_SB_RESOURCE_MMIO_BASE,
-                                             &mmio_addr);
-                       if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
-                                         SB800_ACPI_MMIO_SEL)) !=
-                                                 SB800_ACPI_MMIO_DECODE_EN) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr &= ~0xFFF;
-                       mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
-                       break;
-               case sb800:
-                       /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
-                       mmio_addr =
-                               sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
-                       if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
-                                         SB800_ACPI_MMIO_SEL)) !=
-                                                 SB800_ACPI_MMIO_DECODE_EN) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr &= ~0xFFF;
-                       mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
-                       break;
-               case efch:
-                       val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
-                       if (!(val & EFCH_PM_ISACONTROL_MMIOEN)) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
-                                   EFCH_PM_ACPI_MMIO_WDT_OFFSET;
-                       break;
-               }
-               dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n",
-                       mmio_addr);
-               if (!devm_request_mem_region(dev, mmio_addr,
-                                            SP5100_WDT_MEM_MAP_SIZE,
-                                            dev_name)) {
-                       dev_dbg(dev, "MMIO address 0x%08x already in use\n",
-                               mmio_addr);
-                       ret = -EBUSY;
-                       goto unreg_region;
-               }
-       }
-
-       tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
-       if (!tco->tcobase) {
-               dev_err(dev, "failed to get tcobase address\n");
-               ret = -ENOMEM;
-               goto unreg_region;
-       }
-
-       dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
-
-       /* Setup the watchdog timer */
-       tco_timer_enable(tco);
-
-       val = readl(SP5100_WDT_CONTROL(tco->tcobase));
-       if (val & SP5100_WDT_DISABLED) {
-               dev_err(dev, "Watchdog hardware is disabled\n");
-               ret = -ENODEV;
-               goto unreg_region;
+       ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
+       if (!ret) {
+               /* Setup the watchdog timer */
+               tco_timer_enable(tco);
+               ret = sp5100_tco_timer_init(tco);
        }
 
-       /*
-        * Save WatchDogFired status, because WatchDogFired flag is
-        * cleared here.
-        */
-       if (val & SP5100_WDT_FIRED)
-               wdd->bootstatus = WDIOF_CARDRESET;
-       /* Set watchdog action to reset the system */
-       val &= ~SP5100_WDT_ACTION_RESET;
-       writel(val, SP5100_WDT_CONTROL(tco->tcobase));
-
-       /* Set a reasonable heartbeat before we stop the timer */
-       tco_timer_set_timeout(wdd, wdd->timeout);
-
-       /*
-        * Stop the TCO before we change anything so we don't race with
-        * a zeroed timer.
-        */
-       tco_timer_stop(wdd);
-
-       release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
-
-       return 0;
-
-unreg_region:
        release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
        return ret;
 }
index adf015a..6a0986d 100644 (file)
@@ -58,6 +58,7 @@
 #define SB800_PM_WATCHDOG_SECOND_RES   GENMASK(1, 0)
 #define SB800_ACPI_MMIO_DECODE_EN      BIT(0)
 #define SB800_ACPI_MMIO_SEL            BIT(1)
+#define SB800_ACPI_MMIO_MASK           GENMASK(1, 0)
 
 #define SB800_PM_WDT_MMIO_OFFSET       0xB00
 
 #define EFCH_PM_ISACONTROL_MMIOEN      BIT(1)
 
 #define EFCH_PM_ACPI_MMIO_ADDR         0xfed80000
+#define EFCH_PM_ACPI_MMIO_PM_OFFSET    0x00000300
 #define EFCH_PM_ACPI_MMIO_WDT_OFFSET   0x00000b00
+
+#define EFCH_PM_ACPI_MMIO_PM_ADDR      (EFCH_PM_ACPI_MMIO_ADDR +       \
+                                        EFCH_PM_ACPI_MMIO_PM_OFFSET)
+#define EFCH_PM_ACPI_MMIO_PM_SIZE      8
+#define AMD_ZEN_SMBUS_PCI_REV          0x51
index 3a3d8b5..54903f3 100644 (file)
@@ -171,17 +171,17 @@ static int __watchdog_ping(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_ping: ping the watchdog.
- *     @wdd: the watchdog device to ping
+ * watchdog_ping - ping the watchdog
+ * @wdd: The watchdog device to ping
  *
- *     The caller must hold wd_data->lock.
+ * If the watchdog has no own ping operation then it needs to be
+ * restarted via the start operation. This wrapper function does
+ * exactly that.
+ * We only ping when the watchdog device is running.
+ * The caller must hold wd_data->lock.
  *
- *     If the watchdog has no own ping operation then it needs to be
- *     restarted via the start operation. This wrapper function does
- *     exactly that.
- *     We only ping when the watchdog device is running.
+ * Return: 0 on success, error otherwise.
  */
-
 static int watchdog_ping(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -231,16 +231,14 @@ static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
 }
 
 /*
- *     watchdog_start: wrapper to start the watchdog.
- *     @wdd: the watchdog device to start
+ * watchdog_start - wrapper to start the watchdog
+ * @wdd: The watchdog device to start
  *
- *     The caller must hold wd_data->lock.
+ * Start the watchdog if it is not active and mark it active.
+ * The caller must hold wd_data->lock.
  *
- *     Start the watchdog if it is not active and mark it active.
- *     This function returns zero on success or a negative errno code for
- *     failure.
+ * Return: 0 on success or a negative errno code for failure.
  */
-
 static int watchdog_start(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -274,17 +272,15 @@ static int watchdog_start(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_stop: wrapper to stop the watchdog.
- *     @wdd: the watchdog device to stop
+ * watchdog_stop - wrapper to stop the watchdog
+ * @wdd: The watchdog device to stop
  *
- *     The caller must hold wd_data->lock.
+ * Stop the watchdog if it is still active and unmark it active.
+ * If the 'nowayout' feature was set, the watchdog cannot be stopped.
+ * The caller must hold wd_data->lock.
  *
- *     Stop the watchdog if it is still active and unmark it active.
- *     This function returns zero on success or a negative errno code for
- *     failure.
- *     If the 'nowayout' feature was set, the watchdog cannot be stopped.
+ * Return: 0 on success or a negative errno code for failure.
  */
-
 static int watchdog_stop(struct watchdog_device *wdd)
 {
        int err = 0;
@@ -315,14 +311,14 @@ static int watchdog_stop(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_get_status: wrapper to get the watchdog status
- *     @wdd: the watchdog device to get the status from
+ * watchdog_get_status - wrapper to get the watchdog status
+ * @wdd: The watchdog device to get the status from
  *
- *     The caller must hold wd_data->lock.
+ * Get the watchdog's status flags.
+ * The caller must hold wd_data->lock.
  *
- *     Get the watchdog's status flags.
+ * Return: watchdog's status flags.
  */
-
 static unsigned int watchdog_get_status(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -352,13 +348,14 @@ static unsigned int watchdog_get_status(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_set_timeout: set the watchdog timer timeout
- *     @wdd: the watchdog device to set the timeout for
- *     @timeout: timeout to set in seconds
+ * watchdog_set_timeout - set the watchdog timer timeout
+ * @wdd:       The watchdog device to set the timeout for
+ * @timeout:   Timeout to set in seconds
+ *
+ * The caller must hold wd_data->lock.
  *
- *     The caller must hold wd_data->lock.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_set_timeout(struct watchdog_device *wdd,
                                                        unsigned int timeout)
 {
@@ -385,11 +382,12 @@ static int watchdog_set_timeout(struct watchdog_device *wdd,
 }
 
 /*
- *     watchdog_set_pretimeout: set the watchdog timer pretimeout
- *     @wdd: the watchdog device to set the timeout for
- *     @timeout: pretimeout to set in seconds
+ * watchdog_set_pretimeout - set the watchdog timer pretimeout
+ * @wdd:       The watchdog device to set the timeout for
+ * @timeout:   pretimeout to set in seconds
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_set_pretimeout(struct watchdog_device *wdd,
                                   unsigned int timeout)
 {
@@ -410,15 +408,15 @@ static int watchdog_set_pretimeout(struct watchdog_device *wdd,
 }
 
 /*
- *     watchdog_get_timeleft: wrapper to get the time left before a reboot
- *     @wdd: the watchdog device to get the remaining time from
- *     @timeleft: the time that's left
+ * watchdog_get_timeleft - wrapper to get the time left before a reboot
+ * @wdd:       The watchdog device to get the remaining time from
+ * @timeleft:  The time that's left
  *
- *     The caller must hold wd_data->lock.
+ * Get the time before a watchdog will reboot (if not pinged).
+ * The caller must hold wd_data->lock.
  *
- *     Get the time before a watchdog will reboot (if not pinged).
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_get_timeleft(struct watchdog_device *wdd,
                                                        unsigned int *timeleft)
 {
@@ -635,14 +633,15 @@ __ATTRIBUTE_GROUPS(wdt);
 #endif
 
 /*
- *     watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
- *     @wdd: the watchdog device to do the ioctl on
- *     @cmd: watchdog command
- *     @arg: argument pointer
+ * watchdog_ioctl_op - call the watchdog drivers ioctl op if defined
+ * @wdd: The watchdog device to do the ioctl on
+ * @cmd: Watchdog command
+ * @arg: Argument pointer
  *
- *     The caller must hold wd_data->lock.
+ * The caller must hold wd_data->lock.
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
                                                        unsigned long arg)
 {
@@ -653,17 +652,18 @@ static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
 }
 
 /*
- *     watchdog_write: writes to the watchdog.
- *     @file: file from VFS
- *     @data: user address of data
- *     @len: length of data
- *     @ppos: pointer to the file offset
+ * watchdog_write - writes to the watchdog
+ * @file:      File from VFS
+ * @data:      User address of data
+ * @len:       Length of data
+ * @ppos:      Pointer to the file offset
  *
- *     A write to a watchdog device is defined as a keepalive ping.
- *     Writing the magic 'V' sequence allows the next close to turn
- *     off the watchdog (if 'nowayout' is not set).
+ * A write to a watchdog device is defined as a keepalive ping.
+ * Writing the magic 'V' sequence allows the next close to turn
+ * off the watchdog (if 'nowayout' is not set).
+ *
+ * Return: @len if successful, error otherwise.
  */
-
 static ssize_t watchdog_write(struct file *file, const char __user *data,
                                                size_t len, loff_t *ppos)
 {
@@ -706,13 +706,15 @@ static ssize_t watchdog_write(struct file *file, const char __user *data,
 }
 
 /*
- *     watchdog_ioctl: handle the different ioctl's for the watchdog device.
- *     @file: file handle to the device
- *     @cmd: watchdog command
- *     @arg: argument pointer
+ * watchdog_ioctl - handle the different ioctl's for the watchdog device
+ * @file:      File handle to the device
+ * @cmd:       Watchdog command
+ * @arg:       Argument pointer
  *
- *     The watchdog API defines a common set of functions for all watchdogs
- *     according to their available features.
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ *
+ * Return: 0 if successful, error otherwise.
  */
 
 static long watchdog_ioctl(struct file *file, unsigned int cmd,
@@ -819,15 +821,16 @@ out_ioctl:
 }
 
 /*
- *     watchdog_open: open the /dev/watchdog* devices.
- *     @inode: inode of device
- *     @file: file handle to device
+ * watchdog_open - open the /dev/watchdog* devices
+ * @inode:     Inode of device
+ * @file:      File handle to device
+ *
+ * When the /dev/watchdog* device gets opened, we start the watchdog.
+ * Watch out: the /dev/watchdog device is single open, so we make sure
+ * it can only be opened once.
  *
- *     When the /dev/watchdog* device gets opened, we start the watchdog.
- *     Watch out: the /dev/watchdog device is single open, so we make sure
- *     it can only be opened once.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_open(struct inode *inode, struct file *file)
 {
        struct watchdog_core_data *wd_data;
@@ -896,15 +899,16 @@ static void watchdog_core_data_release(struct device *dev)
 }
 
 /*
- *     watchdog_release: release the watchdog device.
- *     @inode: inode of device
- *     @file: file handle to device
+ * watchdog_release - release the watchdog device
+ * @inode:     Inode of device
+ * @file:      File handle to device
+ *
+ * This is the code for when /dev/watchdog gets closed. We will only
+ * stop the watchdog when we have received the magic char (and nowayout
+ * was not set), else the watchdog will keep running.
  *
- *     This is the code for when /dev/watchdog gets closed. We will only
- *     stop the watchdog when we have received the magic char (and nowayout
- *     was not set), else the watchdog will keep running.
+ * Always returns 0.
  */
-
 static int watchdog_release(struct inode *inode, struct file *file)
 {
        struct watchdog_core_data *wd_data = file->private_data;
@@ -977,14 +981,15 @@ static struct class watchdog_class = {
 };
 
 /*
- *     watchdog_cdev_register: register watchdog character device
- *     @wdd: watchdog device
+ * watchdog_cdev_register - register watchdog character device
+ * @wdd: Watchdog device
+ *
+ * Register a watchdog character device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
  *
- *     Register a watchdog character device including handling the legacy
- *     /dev/watchdog node. /dev/watchdog is actually a miscdevice and
- *     thus we set it up like that.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_cdev_register(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data;
@@ -1074,13 +1079,12 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_cdev_unregister: unregister watchdog character device
- *     @watchdog: watchdog device
+ * watchdog_cdev_unregister - unregister watchdog character device
+ * @wdd: Watchdog device
  *
- *     Unregister watchdog character device and if needed the legacy
- *     /dev/watchdog device.
+ * Unregister watchdog character device and if needed the legacy
+ * /dev/watchdog device.
  */
-
 static void watchdog_cdev_unregister(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -1109,15 +1113,16 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
        put_device(&wd_data->dev);
 }
 
-/*
- *     watchdog_dev_register: register a watchdog device
- *     @wdd: watchdog device
+/**
+ * watchdog_dev_register - register a watchdog device
+ * @wdd: Watchdog device
+ *
+ * Register a watchdog device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
  *
- *     Register a watchdog device including handling the legacy
- *     /dev/watchdog node. /dev/watchdog is actually a miscdevice and
- *     thus we set it up like that.
+ * Return: 0 if successful, error otherwise.
  */
-
 int watchdog_dev_register(struct watchdog_device *wdd)
 {
        int ret;
@@ -1133,30 +1138,31 @@ int watchdog_dev_register(struct watchdog_device *wdd)
        return ret;
 }
 
-/*
- *     watchdog_dev_unregister: unregister a watchdog device
- *     @watchdog: watchdog device
+/**
+ * watchdog_dev_unregister - unregister a watchdog device
+ * @wdd: watchdog device
  *
- *     Unregister watchdog device and if needed the legacy
- *     /dev/watchdog device.
+ * Unregister watchdog device and if needed the legacy
+ * /dev/watchdog device.
  */
-
 void watchdog_dev_unregister(struct watchdog_device *wdd)
 {
        watchdog_unregister_pretimeout(wdd);
        watchdog_cdev_unregister(wdd);
 }
 
-/*
- *     watchdog_set_last_hw_keepalive: set last HW keepalive time for watchdog
- *     @wdd: watchdog device
- *     @last_ping_ms: time since last HW heartbeat
+/**
+ * watchdog_set_last_hw_keepalive - set last HW keepalive time for watchdog
+ * @wdd:               Watchdog device
+ * @last_ping_ms:      Time since last HW heartbeat
  *
- *     Adjusts the last known HW keepalive time for a watchdog timer.
- *     This is needed if the watchdog is already running when the probe
- *     function is called, and it can't be pinged immediately. This
- *     function must be called immediately after watchdog registration,
- *     and min_hw_heartbeat_ms must be set for this to be useful.
+ * Adjusts the last known HW keepalive time for a watchdog timer.
+ * This is needed if the watchdog is already running when the probe
+ * function is called, and it can't be pinged immediately. This
+ * function must be called immediately after watchdog registration,
+ * and min_hw_heartbeat_ms must be set for this to be useful.
+ *
+ * Return: 0 if successful, error otherwise.
  */
 int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
                                   unsigned int last_ping_ms)
@@ -1180,12 +1186,13 @@ int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
 }
 EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
 
-/*
- *     watchdog_dev_init: init dev part of watchdog core
+/**
+ * watchdog_dev_init - init dev part of watchdog core
  *
- *     Allocate a range of chardev nodes to use for watchdog devices
+ * Allocate a range of chardev nodes to use for watchdog devices.
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 int __init watchdog_dev_init(void)
 {
        int err;
@@ -1218,12 +1225,11 @@ err_register:
        return err;
 }
 
-/*
- *     watchdog_dev_exit: exit dev part of watchdog core
+/**
+ * watchdog_dev_exit - exit dev part of watchdog core
  *
- *     Release the range of chardev nodes used for watchdog devices
+ * Release the range of chardev nodes used for watchdog devices.
  */
-
 void __exit watchdog_dev_exit(void)
 {
        unregister_chrdev_region(watchdog_devt, MAX_DOGS);
index 55e108e..1c8dc69 100644 (file)
@@ -49,22 +49,20 @@ int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses,
 
 void v9fs_cache_inode_get_cookie(struct inode *inode)
 {
-       struct v9fs_inode *v9inode;
+       struct v9fs_inode *v9inode = V9FS_I(inode);
        struct v9fs_session_info *v9ses;
        __le32 version;
        __le64 path;
 
        if (!S_ISREG(inode->i_mode))
                return;
-
-       v9inode = V9FS_I(inode);
-       if (WARN_ON(v9inode->fscache))
+       if (WARN_ON(v9fs_inode_cookie(v9inode)))
                return;
 
        version = cpu_to_le32(v9inode->qid.version);
        path = cpu_to_le64(v9inode->qid.path);
        v9ses = v9fs_inode2v9ses(inode);
-       v9inode->fscache =
+       v9inode->netfs_ctx.cache =
                fscache_acquire_cookie(v9fs_session_cache(v9ses),
                                       0,
                                       &path, sizeof(path),
@@ -72,5 +70,5 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
                                       i_size_read(&v9inode->vfs_inode));
 
        p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
-                inode, v9inode->fscache);
+                inode, v9fs_inode_cookie(v9inode));
 }
index 08f65c4..e28ddf7 100644 (file)
@@ -623,9 +623,7 @@ static void v9fs_sysfs_cleanup(void)
 static void v9fs_inode_init_once(void *foo)
 {
        struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
-#ifdef CONFIG_9P_FSCACHE
-       v9inode->fscache = NULL;
-#endif
+
        memset(&v9inode->qid, 0, sizeof(v9inode->qid));
        inode_init_once(&v9inode->vfs_inode);
 }
index bc8b302..ec0e8df 100644 (file)
@@ -9,6 +9,7 @@
 #define FS_9P_V9FS_H
 
 #include <linux/backing-dev.h>
+#include <linux/netfs.h>
 
 /**
  * enum p9_session_flags - option flags for each 9P session
@@ -108,14 +109,15 @@ struct v9fs_session_info {
 #define V9FS_INO_INVALID_ATTR 0x01
 
 struct v9fs_inode {
-#ifdef CONFIG_9P_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        struct p9_qid qid;
        unsigned int cache_validity;
        struct p9_fid *writeback_fid;
        struct mutex v_mutex;
-       struct inode vfs_inode;
 };
 
 static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
@@ -126,7 +128,7 @@ static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
 static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode)
 {
 #ifdef CONFIG_9P_FSCACHE
-       return v9inode->fscache;
+       return netfs_i_cookie(&v9inode->vfs_inode);
 #else
        return NULL;
 #endif
@@ -163,6 +165,7 @@ extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
 extern const struct inode_operations v9fs_dir_inode_operations_dotl;
 extern const struct inode_operations v9fs_file_inode_operations_dotl;
 extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
+extern const struct netfs_request_ops v9fs_req_ops;
 extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses,
                                              struct p9_fid *fid,
                                              struct super_block *sb, int new);
index 76956c9..5011281 100644 (file)
 #include "fid.h"
 
 /**
- * v9fs_req_issue_op - Issue a read from 9P
+ * v9fs_issue_read - Issue a read from 9P
  * @subreq: The read to make
  */
-static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
+static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct p9_fid *fid = rreq->netfs_priv;
        struct iov_iter to;
        loff_t pos = subreq->start + subreq->transferred;
@@ -52,20 +52,21 @@ static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
 }
 
 /**
- * v9fs_init_rreq - Initialise a read request
+ * v9fs_init_request - Initialise a read request
  * @rreq: The read request
  * @file: The file being read from
  */
-static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
+static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
        struct p9_fid *fid = file->private_data;
 
        refcount_inc(&fid->count);
        rreq->netfs_priv = fid;
+       return 0;
 }
 
 /**
- * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
+ * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request
  * @mapping: unused mapping of request to cleanup
  * @priv: private data to cleanup, a fid, guaranted non-null.
  */
@@ -76,22 +77,11 @@ static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
        p9_client_clunk(fid);
 }
 
-/**
- * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
- * @inode: The inode to check
- */
-static bool v9fs_is_cache_enabled(struct inode *inode)
-{
-       struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
-
-       return fscache_cookie_enabled(cookie) && cookie->cache_priv;
-}
-
 /**
  * v9fs_begin_cache_operation - Begin a cache operation for a read
  * @rreq: The read request
  */
-static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
+static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
 {
 #ifdef CONFIG_9P_FSCACHE
        struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
@@ -102,36 +92,13 @@ static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
 #endif
 }
 
-static const struct netfs_read_request_ops v9fs_req_ops = {
-       .init_rreq              = v9fs_init_rreq,
-       .is_cache_enabled       = v9fs_is_cache_enabled,
+const struct netfs_request_ops v9fs_req_ops = {
+       .init_request           = v9fs_init_request,
        .begin_cache_operation  = v9fs_begin_cache_operation,
-       .issue_op               = v9fs_req_issue_op,
+       .issue_read             = v9fs_issue_read,
        .cleanup                = v9fs_req_cleanup,
 };
 
-/**
- * v9fs_vfs_readpage - read an entire page in from 9P
- * @file: file being read
- * @page: structure to page
- *
- */
-static int v9fs_vfs_readpage(struct file *file, struct page *page)
-{
-       struct folio *folio = page_folio(page);
-
-       return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
-}
-
-/**
- * v9fs_vfs_readahead - read a set of pages from 9P
- * @ractl: The readahead parameters
- */
-static void v9fs_vfs_readahead(struct readahead_control *ractl)
-{
-       netfs_readahead(ractl, &v9fs_req_ops, NULL);
-}
-
 /**
  * v9fs_release_page - release the private state associated with a page
  * @page: The page to be released
@@ -308,8 +275,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
         * file.  We need to do this before we get a lock on the page in case
         * there's more than one writer competing for the same cache block.
         */
-       retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
-                                  &v9fs_req_ops, NULL);
+       retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata);
        if (retval < 0)
                return retval;
 
@@ -370,8 +336,8 @@ static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
 #endif
 
 const struct address_space_operations v9fs_addr_operations = {
-       .readpage = v9fs_vfs_readpage,
-       .readahead = v9fs_vfs_readahead,
+       .readpage = netfs_readpage,
+       .readahead = netfs_readahead,
        .dirty_folio = v9fs_dirty_folio,
        .writepage = v9fs_vfs_writepage,
        .write_begin = v9fs_write_begin,
index 84c3cf7..55367ec 100644 (file)
@@ -231,9 +231,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
        v9inode = alloc_inode_sb(sb, v9fs_inode_cache, GFP_KERNEL);
        if (!v9inode)
                return NULL;
-#ifdef CONFIG_9P_FSCACHE
-       v9inode->fscache = NULL;
-#endif
        v9inode->writeback_fid = NULL;
        v9inode->cache_validity = 0;
        mutex_init(&v9inode->v_mutex);
@@ -250,6 +247,14 @@ void v9fs_free_inode(struct inode *inode)
        kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
 }
 
+/*
+ * Set parameters for the netfs library
+ */
+static void v9fs_set_netfs_context(struct inode *inode)
+{
+       netfs_i_context_init(inode, &v9fs_req_ops);
+}
+
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
                    struct inode *inode, umode_t mode, dev_t rdev)
 {
@@ -338,6 +343,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
                err = -EINVAL;
                goto error;
        }
+
+       v9fs_set_netfs_context(inode);
 error:
        return err;
 
index db832cc..f120bcb 100644 (file)
@@ -76,6 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
        /* there shouldn't be an existing inode */
        BUG_ON(!(inode->i_state & I_NEW));
 
+       netfs_i_context_init(inode, NULL);
        inode->i_size           = 0;
        inode->i_mode           = S_IFDIR | S_IRUGO | S_IXUGO;
        if (root) {
index 0f9fdb2..26292a1 100644 (file)
 #include "internal.h"
 
 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
-static int afs_readpage(struct file *file, struct page *page);
 static int afs_symlink_readpage(struct file *file, struct page *page);
 static void afs_invalidate_folio(struct folio *folio, size_t offset,
                               size_t length);
 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
 
-static void afs_readahead(struct readahead_control *ractl);
 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
 static void afs_vm_open(struct vm_area_struct *area);
 static void afs_vm_close(struct vm_area_struct *area);
@@ -52,8 +50,8 @@ const struct inode_operations afs_file_inode_operations = {
 };
 
 const struct address_space_operations afs_file_aops = {
-       .readpage       = afs_readpage,
-       .readahead      = afs_readahead,
+       .readpage       = netfs_readpage,
+       .readahead      = netfs_readahead,
        .dirty_folio    = afs_dirty_folio,
        .launder_folio  = afs_launder_folio,
        .releasepage    = afs_releasepage,
@@ -240,7 +238,7 @@ void afs_put_read(struct afs_read *req)
 static void afs_fetch_data_notify(struct afs_operation *op)
 {
        struct afs_read *req = op->fetch.req;
-       struct netfs_read_subrequest *subreq = req->subreq;
+       struct netfs_io_subrequest *subreq = req->subreq;
        int error = op->error;
 
        if (error == -ECONNABORTED)
@@ -310,7 +308,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
        return afs_do_sync_operation(op);
 }
 
-static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
+static void afs_issue_read(struct netfs_io_subrequest *subreq)
 {
        struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
        struct afs_read *fsreq;
@@ -359,19 +357,13 @@ static int afs_symlink_readpage(struct file *file, struct page *page)
        return ret;
 }
 
-static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file)
+static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
        rreq->netfs_priv = key_get(afs_file_key(file));
+       return 0;
 }
 
-static bool afs_is_cache_enabled(struct inode *inode)
-{
-       struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode));
-
-       return fscache_cookie_enabled(cookie) && cookie->cache_priv;
-}
-
-static int afs_begin_cache_operation(struct netfs_read_request *rreq)
+static int afs_begin_cache_operation(struct netfs_io_request *rreq)
 {
 #ifdef CONFIG_AFS_FSCACHE
        struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
@@ -396,27 +388,14 @@ static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv)
        key_put(netfs_priv);
 }
 
-const struct netfs_read_request_ops afs_req_ops = {
-       .init_rreq              = afs_init_rreq,
-       .is_cache_enabled       = afs_is_cache_enabled,
+const struct netfs_request_ops afs_req_ops = {
+       .init_request           = afs_init_request,
        .begin_cache_operation  = afs_begin_cache_operation,
        .check_write_begin      = afs_check_write_begin,
-       .issue_op               = afs_req_issue_op,
+       .issue_read             = afs_issue_read,
        .cleanup                = afs_priv_cleanup,
 };
 
-static int afs_readpage(struct file *file, struct page *page)
-{
-       struct folio *folio = page_folio(page);
-
-       return netfs_readpage(file, folio, &afs_req_ops, NULL);
-}
-
-static void afs_readahead(struct readahead_control *ractl)
-{
-       netfs_readahead(ractl, &afs_req_ops, NULL);
-}
-
 int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode)));
index 5964f8a..2fe4024 100644 (file)
@@ -53,6 +53,14 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
                dump_stack();
 }
 
+/*
+ * Set parameters for the netfs library
+ */
+static void afs_set_netfs_context(struct afs_vnode *vnode)
+{
+       netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops);
+}
+
 /*
  * Initialise an inode from the vnode status.
  */
@@ -128,6 +136,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
        }
 
        afs_set_i_size(vnode, status->size);
+       afs_set_netfs_context(vnode);
 
        vnode->invalid_before   = status->data_version;
        inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
@@ -237,6 +246,7 @@ static void afs_apply_status(struct afs_operation *op,
                 * idea of what the size should be that's not the same as
                 * what's on the server.
                 */
+               vnode->netfs_ctx.remote_i_size = status->size;
                if (change_size) {
                        afs_set_i_size(vnode, status->size);
                        inode->i_ctime = t;
@@ -420,7 +430,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
        struct afs_vnode_cache_aux aux;
 
        if (vnode->status.type != AFS_FTYPE_FILE) {
-               vnode->cache = NULL;
+               vnode->netfs_ctx.cache = NULL;
                return;
        }
 
@@ -430,12 +440,14 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
        key.vnode_id_ext[1]     = htonl(vnode->fid.vnode_hi);
        afs_set_cache_aux(vnode, &aux);
 
-       vnode->cache = fscache_acquire_cookie(
-               vnode->volume->cache,
-               vnode->status.type == AFS_FTYPE_FILE ? 0 : FSCACHE_ADV_SINGLE_CHUNK,
-               &key, sizeof(key),
-               &aux, sizeof(aux),
-               vnode->status.size);
+       afs_vnode_set_cache(vnode,
+                           fscache_acquire_cookie(
+                                   vnode->volume->cache,
+                                   vnode->status.type == AFS_FTYPE_FILE ?
+                                   0 : FSCACHE_ADV_SINGLE_CHUNK,
+                                   &key, sizeof(key),
+                                   &aux, sizeof(aux),
+                                   vnode->status.size));
 #endif
 }
 
@@ -528,6 +540,7 @@ struct inode *afs_root_iget(struct super_block *sb, struct key *key)
 
        vnode = AFS_FS_I(inode);
        vnode->cb_v_break = as->volume->cb_v_break,
+       afs_set_netfs_context(vnode);
 
        op = afs_alloc_operation(key, as->volume);
        if (IS_ERR(op)) {
@@ -786,11 +799,8 @@ void afs_evict_inode(struct inode *inode)
                afs_put_wb_key(wbk);
        }
 
-#ifdef CONFIG_AFS_FSCACHE
-       fscache_relinquish_cookie(vnode->cache,
+       fscache_relinquish_cookie(afs_vnode_cache(vnode),
                                  test_bit(AFS_VNODE_DELETED, &vnode->flags));
-       vnode->cache = NULL;
-#endif
 
        afs_prune_wb_keys(vnode);
        afs_put_permits(rcu_access_pointer(vnode->permit_cache));
index dc5032e..7b7ef94 100644 (file)
@@ -207,7 +207,7 @@ struct afs_read {
        loff_t                  file_size;      /* File size returned by server */
        struct key              *key;           /* The key to use to reissue the read */
        struct afs_vnode        *vnode;         /* The file being read into. */
-       struct netfs_read_subrequest *subreq;   /* Fscache helper read request this belongs to */
+       struct netfs_io_subrequest *subreq;     /* Fscache helper read request this belongs to */
        afs_dataversion_t       data_version;   /* Version number returned by server */
        refcount_t              usage;
        unsigned int            call_debug_id;
@@ -619,15 +619,16 @@ enum afs_lock_state {
  * leak from one inode to another.
  */
 struct afs_vnode {
-       struct inode            vfs_inode;      /* the VFS's inode record */
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
 
        struct afs_volume       *volume;        /* volume on which vnode resides */
        struct afs_fid          fid;            /* the file identifier for this inode */
        struct afs_file_status  status;         /* AFS status info for this file */
        afs_dataversion_t       invalid_before; /* Child dentries are invalid before this */
-#ifdef CONFIG_AFS_FSCACHE
-       struct fscache_cookie   *cache;         /* caching cookie */
-#endif
        struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
        struct mutex            io_lock;        /* Lock for serialising I/O on this mutex */
        struct rw_semaphore     validate_lock;  /* lock for validating this vnode */
@@ -674,12 +675,20 @@ struct afs_vnode {
 static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
 {
 #ifdef CONFIG_AFS_FSCACHE
-       return vnode->cache;
+       return netfs_i_cookie(&vnode->vfs_inode);
 #else
        return NULL;
 #endif
 }
 
+static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
+                                      struct fscache_cookie *cookie)
+{
+#ifdef CONFIG_AFS_FSCACHE
+       vnode->netfs_ctx.cache = cookie;
+#endif
+}
+
 /*
  * cached security record for one user's attempt to access a vnode
  */
@@ -1063,7 +1072,7 @@ extern const struct address_space_operations afs_file_aops;
 extern const struct address_space_operations afs_symlink_aops;
 extern const struct inode_operations afs_file_inode_operations;
 extern const struct file_operations afs_file_operations;
-extern const struct netfs_read_request_ops afs_req_ops;
+extern const struct netfs_request_ops afs_req_ops;
 
 extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *);
 extern void afs_put_wb_key(struct afs_wb_key *);
index 7592c0f..1fea195 100644 (file)
@@ -688,13 +688,11 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
        /* Reset anything that shouldn't leak from one inode to the next. */
        memset(&vnode->fid, 0, sizeof(vnode->fid));
        memset(&vnode->status, 0, sizeof(vnode->status));
+       afs_vnode_set_cache(vnode, NULL);
 
        vnode->volume           = NULL;
        vnode->lock_key         = NULL;
        vnode->permit_cache     = NULL;
-#ifdef CONFIG_AFS_FSCACHE
-       vnode->cache            = NULL;
-#endif
 
        vnode->flags            = 1 << AFS_VNODE_UNSET;
        vnode->lock_state       = AFS_VNODE_LOCK_NONE;
index e1c1708..6bcf147 100644 (file)
@@ -60,8 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
         * file.  We need to do this before we get a lock on the page in case
         * there's more than one writer competing for the same cache block.
         */
-       ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata,
-                               &afs_req_ops, NULL);
+       ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata);
        if (ret < 0)
                return ret;
 
@@ -355,9 +354,10 @@ static const struct afs_operation_ops afs_store_data_operation = {
 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
                          bool laundering)
 {
+       struct netfs_i_context *ictx = &vnode->netfs_ctx;
        struct afs_operation *op;
        struct afs_wb_key *wbk = NULL;
-       loff_t size = iov_iter_count(iter), i_size;
+       loff_t size = iov_iter_count(iter);
        int ret = -ENOKEY;
 
        _enter("%s{%llx:%llu.%u},%llx,%llx",
@@ -379,15 +379,13 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
                return -ENOMEM;
        }
 
-       i_size = i_size_read(&vnode->vfs_inode);
-
        afs_op_set_vnode(op, 0, vnode);
        op->file[0].dv_delta = 1;
        op->file[0].modification = true;
        op->store.write_iter = iter;
        op->store.pos = pos;
        op->store.size = size;
-       op->store.i_size = max(pos + size, i_size);
+       op->store.i_size = max(pos + size, ictx->remote_i_size);
        op->store.laundering = laundering;
        op->mtime = vnode->vfs_inode.i_mtime;
        op->flags |= AFS_OPERATION_UNINTR;
index bc7c7a7..9dc81e7 100644 (file)
@@ -380,18 +380,18 @@ presubmission_error:
  * Prepare a read operation, shortening it to a cached/uncached
  * boundary as appropriate.
  */
-static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subrequest *subreq,
+static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
                                                      loff_t i_size)
 {
        enum cachefiles_prepare_read_trace why;
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct netfs_cache_resources *cres = &rreq->cache_resources;
        struct cachefiles_object *object;
        struct cachefiles_cache *cache;
        struct fscache_cookie *cookie = fscache_cres_cookie(cres);
        const struct cred *saved_cred;
        struct file *file = cachefiles_cres_file(cres);
-       enum netfs_read_source ret = NETFS_DOWNLOAD_FROM_SERVER;
+       enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
        loff_t off, to;
        ino_t ino = file ? file_inode(file)->i_ino : 0;
 
@@ -404,7 +404,7 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
        }
 
        if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
-               __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+               __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
                why = cachefiles_trace_read_no_data;
                goto out_no_object;
        }
@@ -473,7 +473,7 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
        goto out;
 
 download_and_store:
-       __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+       __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
 out:
        cachefiles_end_secure(cache, saved_cred);
 out_no_object:
index c7a0ab0..aa25bff 100644 (file)
@@ -182,7 +182,7 @@ static int ceph_releasepage(struct page *page, gfp_t gfp)
        return 1;
 }
 
-static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
+static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
 {
        struct inode *inode = rreq->inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
@@ -199,7 +199,7 @@ static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
        rreq->len = roundup(rreq->len, lo->stripe_unit);
 }
 
-static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq)
+static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
 {
        struct inode *inode = subreq->rreq->inode;
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -218,7 +218,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
 {
        struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode);
        struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
-       struct netfs_read_subrequest *subreq = req->r_priv;
+       struct netfs_io_subrequest *subreq = req->r_priv;
        int num_pages;
        int err = req->r_result;
 
@@ -244,9 +244,9 @@ static void finish_netfs_read(struct ceph_osd_request *req)
        iput(req->r_inode);
 }
 
-static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq)
+static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct inode *inode = rreq->inode;
        struct ceph_mds_reply_info_parsed *rinfo;
        struct ceph_mds_reply_info_in *iinfo;
@@ -258,7 +258,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq)
        size_t len;
 
        __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
-       __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+       __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
 
        if (subreq->start >= inode->i_size)
                goto out;
@@ -297,9 +297,9 @@ out:
        return true;
 }
 
-static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq)
+static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct inode *inode = rreq->inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -353,6 +353,45 @@ out:
        dout("%s: result %d\n", __func__, err);
 }
 
+static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
+{
+       struct inode *inode = rreq->inode;
+       int got = 0, want = CEPH_CAP_FILE_CACHE;
+       int ret = 0;
+
+       if (rreq->origin != NETFS_READAHEAD)
+               return 0;
+
+       if (file) {
+               struct ceph_rw_context *rw_ctx;
+               struct ceph_file_info *fi = file->private_data;
+
+               rw_ctx = ceph_find_rw_context(fi);
+               if (rw_ctx)
+                       return 0;
+       }
+
+       /*
+        * readahead callers do not necessarily hold Fcb caps
+        * (e.g. fadvise, madvise).
+        */
+       ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
+       if (ret < 0) {
+               dout("start_read %p, error getting cap\n", inode);
+               return ret;
+       }
+
+       if (!(got & want)) {
+               dout("start_read %p, no cache cap\n", inode);
+               return -EACCES;
+       }
+       if (ret == 0)
+               return -EACCES;
+
+       rreq->netfs_priv = (void *)(uintptr_t)got;
+       return 0;
+}
+
 static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
 {
        struct inode *inode = mapping->host;
@@ -363,64 +402,16 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
                ceph_put_cap_refs(ci, got);
 }
 
-static const struct netfs_read_request_ops ceph_netfs_read_ops = {
-       .is_cache_enabled       = ceph_is_cache_enabled,
+const struct netfs_request_ops ceph_netfs_ops = {
+       .init_request           = ceph_init_request,
        .begin_cache_operation  = ceph_begin_cache_operation,
-       .issue_op               = ceph_netfs_issue_op,
+       .issue_read             = ceph_netfs_issue_read,
        .expand_readahead       = ceph_netfs_expand_readahead,
        .clamp_length           = ceph_netfs_clamp_length,
        .check_write_begin      = ceph_netfs_check_write_begin,
        .cleanup                = ceph_readahead_cleanup,
 };
 
-/* read a single page, without unlocking it. */
-static int ceph_readpage(struct file *file, struct page *subpage)
-{
-       struct folio *folio = page_folio(subpage);
-       struct inode *inode = file_inode(file);
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_vino vino = ceph_vino(inode);
-       size_t len = folio_size(folio);
-       u64 off = folio_file_pos(folio);
-
-       dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n inline %d",
-            vino.ino, vino.snap, file, off, len, folio, folio_index(folio),
-            ci->i_inline_version != CEPH_INLINE_NONE);
-
-       return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL);
-}
-
-static void ceph_readahead(struct readahead_control *ractl)
-{
-       struct inode *inode = file_inode(ractl->file);
-       struct ceph_file_info *fi = ractl->file->private_data;
-       struct ceph_rw_context *rw_ctx;
-       int got = 0;
-       int ret = 0;
-
-       if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
-               return;
-
-       rw_ctx = ceph_find_rw_context(fi);
-       if (!rw_ctx) {
-               /*
-                * readahead callers do not necessarily hold Fcb caps
-                * (e.g. fadvise, madvise).
-                */
-               int want = CEPH_CAP_FILE_CACHE;
-
-               ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
-               if (ret < 0)
-                       dout("start_read %p, error getting cap\n", inode);
-               else if (!(got & want))
-                       dout("start_read %p, no cache cap\n", inode);
-
-               if (ret <= 0)
-                       return;
-       }
-       netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got);
-}
-
 #ifdef CONFIG_CEPH_FSCACHE
 static void ceph_set_page_fscache(struct page *page)
 {
@@ -1327,8 +1318,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
        struct folio *folio = NULL;
        int r;
 
-       r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL,
-                             &ceph_netfs_read_ops, NULL);
+       r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL);
        if (r == 0)
                folio_wait_fscache(folio);
        if (r < 0) {
@@ -1382,8 +1372,8 @@ out:
 }
 
 const struct address_space_operations ceph_aops = {
-       .readpage = ceph_readpage,
-       .readahead = ceph_readahead,
+       .readpage = netfs_readpage,
+       .readahead = netfs_readahead,
        .writepage = ceph_writepage,
        .writepages = ceph_writepages_start,
        .write_begin = ceph_write_begin,
index 7d22850..ddea999 100644 (file)
@@ -29,26 +29,25 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
        if (!(inode->i_state & I_NEW))
                return;
 
-       WARN_ON_ONCE(ci->fscache);
+       WARN_ON_ONCE(ci->netfs_ctx.cache);
 
-       ci->fscache = fscache_acquire_cookie(fsc->fscache, 0,
-                                            &ci->i_vino, sizeof(ci->i_vino),
-                                            &ci->i_version, sizeof(ci->i_version),
-                                            i_size_read(inode));
+       ci->netfs_ctx.cache =
+               fscache_acquire_cookie(fsc->fscache, 0,
+                                      &ci->i_vino, sizeof(ci->i_vino),
+                                      &ci->i_version, sizeof(ci->i_version),
+                                      i_size_read(inode));
 }
 
-void ceph_fscache_unregister_inode_cookie(struct ceph_inode_infoci)
+void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci)
 {
-       struct fscache_cookie *cookie = ci->fscache;
-
-       fscache_relinquish_cookie(cookie, false);
+       fscache_relinquish_cookie(ceph_fscache_cookie(ci), false);
 }
 
 void ceph_fscache_use_cookie(struct inode *inode, bool will_modify)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       fscache_use_cookie(ci->fscache, will_modify);
+       fscache_use_cookie(ceph_fscache_cookie(ci), will_modify);
 }
 
 void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
@@ -58,9 +57,10 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
        if (update) {
                loff_t i_size = i_size_read(inode);
 
-               fscache_unuse_cookie(ci->fscache, &ci->i_version, &i_size);
+               fscache_unuse_cookie(ceph_fscache_cookie(ci),
+                                    &ci->i_version, &i_size);
        } else {
-               fscache_unuse_cookie(ci->fscache, NULL, NULL);
+               fscache_unuse_cookie(ceph_fscache_cookie(ci), NULL, NULL);
        }
 }
 
@@ -69,14 +69,14 @@ void ceph_fscache_update(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        loff_t i_size = i_size_read(inode);
 
-       fscache_update_cookie(ci->fscache, &ci->i_version, &i_size);
+       fscache_update_cookie(ceph_fscache_cookie(ci), &ci->i_version, &i_size);
 }
 
 void ceph_fscache_invalidate(struct inode *inode, bool dio_write)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       fscache_invalidate(ceph_inode(inode)->fscache,
+       fscache_invalidate(ceph_fscache_cookie(ci),
                           &ci->i_version, i_size_read(inode),
                           dio_write ? FSCACHE_INVAL_DIO_WRITE : 0);
 }
index b90f301..7255b79 100644 (file)
@@ -26,14 +26,9 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update);
 void ceph_fscache_update(struct inode *inode);
 void ceph_fscache_invalidate(struct inode *inode, bool dio_write);
 
-static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-       ci->fscache = NULL;
-}
-
 static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
 {
-       return ci->fscache;
+       return netfs_i_cookie(&ci->vfs_inode);
 }
 
 static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
@@ -62,7 +57,7 @@ static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
        return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci));
 }
 
-static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
+static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
 {
        struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode));
 
@@ -91,10 +86,6 @@ static inline void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
 {
 }
 
-static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-}
-
 static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
 {
 }
@@ -144,7 +135,7 @@ static inline bool ceph_is_cache_enabled(struct inode *inode)
        return false;
 }
 
-static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
+static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
 {
        return -ENOBUFS;
 }
index d80911d..63113e2 100644 (file)
@@ -459,6 +459,9 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        dout("alloc_inode %p\n", &ci->vfs_inode);
 
+       /* Set parameters for the netfs library */
+       netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops);
+
        spin_lock_init(&ci->i_ceph_lock);
 
        ci->i_version = 0;
@@ -544,9 +547,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        INIT_WORK(&ci->i_work, ceph_inode_work);
        ci->i_work_mask = 0;
        memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
-
-       ceph_fscache_inode_init(ci);
-
        return &ci->vfs_inode;
 }
 
index a1ecc41..20ceab7 100644 (file)
 #include <linux/posix_acl.h>
 #include <linux/refcount.h>
 #include <linux/security.h>
+#include <linux/netfs.h>
+#include <linux/fscache.h>
 
 #include <linux/ceph/libceph.h>
 
-#ifdef CONFIG_CEPH_FSCACHE
-#include <linux/fscache.h>
-#endif
-
 /* large granularity for statfs utilization stats to facilitate
  * large volume sizes on 32-bit machines. */
 #define CEPH_BLOCK_SHIFT   22  /* 4 MB */
@@ -318,6 +316,11 @@ struct ceph_inode_xattrs_info {
  * Ceph inode.
  */
 struct ceph_inode_info {
+       struct {
+               /* These must be contiguous */
+               struct inode vfs_inode;
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
        spinlock_t i_ceph_lock;
@@ -428,11 +431,6 @@ struct ceph_inode_info {
 
        struct work_struct i_work;
        unsigned long  i_work_mask;
-
-#ifdef CONFIG_CEPH_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
-       struct inode vfs_inode; /* at end */
 };
 
 static inline struct ceph_inode_info *
@@ -1216,6 +1214,7 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
 
 /* addr.c */
 extern const struct address_space_operations ceph_aops;
+extern const struct netfs_request_ops ceph_netfs_ops;
 extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
 extern int ceph_uninline_data(struct file *file);
 extern int ceph_pool_perm_check(struct inode *inode, int need);
index 48b343d..0a4085c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mempool.h>
 #include <linux/workqueue.h>
 #include <linux/utsname.h>
+#include <linux/netfs.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
 #include <crypto/internal/hash.h>
@@ -1402,6 +1403,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
  */
 
 struct cifsInodeInfo {
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        bool can_cache_brlcks;
        struct list_head llist; /* locks helb by this inode */
        /*
@@ -1432,10 +1438,6 @@ struct cifsInodeInfo {
        u64  uniqueid;                  /* server inode number */
        u64  createtime;                /* creation time on server */
        __u8 lease_key[SMB2_LEASE_KEY_SIZE];    /* lease key for this inode */
-#ifdef CONFIG_CIFS_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
-       struct inode vfs_inode;
        struct list_head deferred_closes; /* list of deferred closes */
        spinlock_t deferred_lock; /* protection on deferred list */
        bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
index 33af72e..a638b29 100644 (file)
@@ -103,7 +103,7 @@ void cifs_fscache_get_inode_cookie(struct inode *inode)
 
        cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
 
-       cifsi->fscache =
+       cifsi->netfs_ctx.cache =
                fscache_acquire_cookie(tcon->fscache, 0,
                                       &cifsi->uniqueid, sizeof(cifsi->uniqueid),
                                       &cd, sizeof(cd),
@@ -126,22 +126,15 @@ void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
 void cifs_fscache_release_inode_cookie(struct inode *inode)
 {
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
+       struct fscache_cookie *cookie = cifs_inode_cookie(inode);
 
-       if (cifsi->fscache) {
-               cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
-               fscache_relinquish_cookie(cifsi->fscache, false);
-               cifsi->fscache = NULL;
+       if (cookie) {
+               cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie);
+               fscache_relinquish_cookie(cookie, false);
+               cifsi->netfs_ctx.cache = NULL;
        }
 }
 
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
 /*
  * Fallback page reading interface.
  */
index 5512990..52355c0 100644 (file)
@@ -61,7 +61,7 @@ void cifs_fscache_fill_coherency(struct inode *inode,
 
 static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
 {
-       return CIFS_I(inode)->fscache;
+       return netfs_i_cookie(inode);
 }
 
 static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags)
index f121c21..ed1c9ed 100644 (file)
@@ -70,17 +70,6 @@ static inline void fscache_see_cookie(struct fscache_cookie *cookie,
                             where);
 }
 
-/*
- * io.c
- */
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
 /*
  * main.c
  */
index d671084..39080b2 100644 (file)
@@ -606,9 +606,9 @@ out:
        return ret;
 }
 
-static inline __be64 *gfs2_indirect_init(struct metapath *mp,
-                                        struct gfs2_glock *gl, unsigned int i,
-                                        unsigned offset, u64 bn)
+static inline void gfs2_indirect_init(struct metapath *mp,
+                                     struct gfs2_glock *gl, unsigned int i,
+                                     unsigned offset, u64 bn)
 {
        __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
                       ((i > 1) ? sizeof(struct gfs2_meta_header) :
@@ -621,7 +621,6 @@ static inline __be64 *gfs2_indirect_init(struct metapath *mp,
        gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
        ptr += offset;
        *ptr = cpu_to_be64(bn);
-       return ptr;
 }
 
 enum alloc_state {
@@ -2146,7 +2145,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
 
        ret = do_shrink(inode, newsize);
 out:
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_delete(ip);
        gfs2_qa_put(ip);
        return ret;
 }
index 8c39a85..22b41ac 100644 (file)
@@ -706,7 +706,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
 
        if (file->f_mode & FMODE_WRITE) {
                if (gfs2_rs_active(&ip->i_res))
-                       gfs2_rs_delete(ip, &inode->i_writecount);
+                       gfs2_rs_delete(ip);
                gfs2_qa_put(ip);
        }
        return 0;
@@ -775,8 +775,7 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
                                         size_t *window_size)
 {
        size_t count = iov_iter_count(i);
-       char __user *p;
-       int pages = 1;
+       size_t size, offs;
 
        if (likely(!count))
                return false;
@@ -785,18 +784,20 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
        if (!iter_is_iovec(i))
                return false;
 
+       size = PAGE_SIZE;
+       offs = offset_in_page(i->iov[0].iov_base + i->iov_offset);
        if (*prev_count != count || !*window_size) {
-               int pages, nr_dirtied;
+               size_t nr_dirtied;
 
-               pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE));
+               size = ALIGN(offs + count, PAGE_SIZE);
+               size = min_t(size_t, size, SZ_1M);
                nr_dirtied = max(current->nr_dirtied_pause -
-                                current->nr_dirtied, 1);
-               pages = min(pages, nr_dirtied);
+                                current->nr_dirtied, 8);
+               size = min(size, nr_dirtied << PAGE_SHIFT);
        }
 
        *prev_count = count;
-       p = i->iov[0].iov_base + i->iov_offset;
-       *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
+       *window_size = size - offs;
        return true;
 }
 
@@ -851,9 +852,9 @@ retry_under_glock:
                leftover = fault_in_iov_iter_writeable(to, window_size);
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(gh))
-                               goto retry;
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       goto retry;
                }
        }
        if (gfs2_holder_queued(gh))
@@ -920,9 +921,9 @@ retry_under_glock:
                leftover = fault_in_iov_iter_readable(from, window_size);
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(gh))
-                               goto retry;
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       goto retry;
                }
        }
 out:
@@ -950,20 +951,19 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
         * and retry.
         */
 
-       if (iocb->ki_flags & IOCB_DIRECT) {
-               ret = gfs2_file_direct_read(iocb, to, &gh);
-               if (likely(ret != -ENOTBLK))
-                       return ret;
-               iocb->ki_flags &= ~IOCB_DIRECT;
-       }
+       if (iocb->ki_flags & IOCB_DIRECT)
+               return gfs2_file_direct_read(iocb, to, &gh);
+
+       pagefault_disable();
        iocb->ki_flags |= IOCB_NOIO;
        ret = generic_file_read_iter(iocb, to);
        iocb->ki_flags &= ~IOCB_NOIO;
+       pagefault_enable();
        if (ret >= 0) {
                if (!iov_iter_count(to))
                        return ret;
                written = ret;
-       } else {
+       } else if (ret != -EFAULT) {
                if (ret != -EAGAIN)
                        return ret;
                if (iocb->ki_flags & IOCB_NOWAIT)
@@ -989,12 +989,11 @@ retry_under_glock:
                leftover = fault_in_iov_iter_writeable(to, window_size);
                gfs2_holder_disallow_demote(&gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(&gh)) {
-                               if (written)
-                                       goto out_uninit;
-                               goto retry;
-                       }
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(&gh))
+                               goto retry_under_glock;
+                       if (written)
+                               goto out_uninit;
+                       goto retry;
                }
        }
        if (gfs2_holder_queued(&gh))
@@ -1068,12 +1067,11 @@ retry_under_glock:
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
                        from->count = min(from->count, window_size - leftover);
-                       if (!gfs2_holder_queued(gh)) {
-                               if (read)
-                                       goto out_uninit;
-                               goto retry;
-                       }
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       if (read && !(iocb->ki_flags & IOCB_DIRECT))
+                               goto out_uninit;
+                       goto retry;
                }
        }
 out_unlock:
@@ -1083,6 +1081,7 @@ out_uninit:
        gfs2_holder_uninit(gh);
        if (statfs_gh)
                kfree(statfs_gh);
+       from->count = orig_count - read;
        return read ? read : ret;
 }
 
@@ -1497,7 +1496,6 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
                if (error != GLR_TRYFAILED)
                        break;
                fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
-               fl_gh->gh_error = 0;
                msleep(sleeptime);
        }
        if (error) {
index 6b23399..630c655 100644 (file)
@@ -542,7 +542,7 @@ restart:
                         * some reason. If this holder is the head of the list, it
                         * means we have a blocked holder at the head, so return 1.
                         */
-                       if (gh->gh_list.prev == &gl->gl_holders)
+                       if (list_is_first(&gh->gh_list, &gl->gl_holders))
                                return 1;
                        do_error(gl, 0);
                        break;
@@ -669,6 +669,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 
        /* Check for state != intended state */
        if (unlikely(state != gl->gl_target)) {
+               if (gh && (ret & LM_OUT_CANCELED))
+                       gfs2_holder_wake(gh);
                if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
                        /* move to back of queue and try next entry */
                        if (ret & LM_OUT_CANCELED) {
@@ -1259,7 +1261,6 @@ void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
        gh->gh_owner_pid = get_pid(task_pid(current));
        gh->gh_state = state;
        gh->gh_flags = flags;
-       gh->gh_error = 0;
        gh->gh_iflags = 0;
        gfs2_glock_hold(gl);
 }
@@ -1565,6 +1566,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
        if (test_bit(GLF_LRU, &gl->gl_flags))
                gfs2_glock_remove_from_lru(gl);
 
+       gh->gh_error = 0;
        spin_lock(&gl->gl_lockref.lock);
        add_to_queue(gh);
        if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
@@ -1691,6 +1693,14 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
        struct gfs2_glock *gl = gh->gh_gl;
 
        spin_lock(&gl->gl_lockref.lock);
+       if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
+           !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+               spin_unlock(&gl->gl_lockref.lock);
+               gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
+               wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
+               spin_lock(&gl->gl_lockref.lock);
+       }
+
        __gfs2_glock_dq(gh);
        spin_unlock(&gl->gl_lockref.lock);
 }
index 89905f4..c8ec876 100644 (file)
@@ -131,7 +131,21 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                struct gfs2_sbd *sdp = GFS2_SB(inode);
                struct gfs2_glock *io_gl;
 
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE,
+                                      &ip->i_gl);
+               if (unlikely(error))
+                       goto fail;
+
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE,
+                                      &io_gl);
+               if (unlikely(error))
+                       goto fail;
+
+               if (blktype != GFS2_BLKST_UNLINKED)
+                       gfs2_cancel_delete_work(io_gl);
+               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT,
+                                          &ip->i_iopen_gh);
+               gfs2_glock_put(io_gl);
                if (unlikely(error))
                        goto fail;
 
@@ -161,16 +175,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
                set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
 
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
-               if (unlikely(error))
-                       goto fail;
-               if (blktype != GFS2_BLKST_UNLINKED)
-                       gfs2_cancel_delete_work(io_gl);
-               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
-               gfs2_glock_put(io_gl);
-               if (unlikely(error))
-                       goto fail;
-
                /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
                inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
                inode->i_atime.tv_nsec = 0;
@@ -716,13 +720,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
        BUG_ON(error);
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
        if (error)
                goto fail_gunlock2;
 
+       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       if (error)
+               goto fail_gunlock3;
+
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
-               goto fail_gunlock2;
+               goto fail_gunlock3;
 
        if (blocks > 1) {
                ip->i_eattr = ip->i_no_addr + 1;
@@ -731,10 +739,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        init_dinode(dip, ip, symname);
        gfs2_trans_end(sdp);
 
-       error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
-       if (error)
-               goto fail_gunlock2;
-
        glock_set_object(ip->i_gl, ip);
        glock_set_object(io_gl, ip);
        gfs2_set_iop(inode);
@@ -745,14 +749,14 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (default_acl) {
                error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
                if (error)
-                       goto fail_gunlock3;
+                       goto fail_gunlock4;
                posix_acl_release(default_acl);
                default_acl = NULL;
        }
        if (acl) {
                error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
                if (error)
-                       goto fail_gunlock3;
+                       goto fail_gunlock4;
                posix_acl_release(acl);
                acl = NULL;
        }
@@ -760,11 +764,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
                                             &gfs2_initxattrs, NULL);
        if (error)
-               goto fail_gunlock3;
+               goto fail_gunlock4;
 
        error = link_dinode(dip, name, ip, &da);
        if (error)
-               goto fail_gunlock3;
+               goto fail_gunlock4;
 
        mark_inode_dirty(inode);
        d_instantiate(dentry, inode);
@@ -782,9 +786,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        unlock_new_inode(inode);
        return error;
 
-fail_gunlock3:
+fail_gunlock4:
        glock_clear_object(ip->i_gl, ip);
        glock_clear_object(io_gl, ip);
+fail_gunlock3:
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_gunlock2:
        gfs2_glock_put(io_gl);
@@ -793,7 +798,7 @@ fail_free_inode:
                if (free_vfs_inode) /* else evict will do the put for us */
                        gfs2_glock_put(ip->i_gl);
        }
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_deltree(&ip->i_res);
        gfs2_qa_put(ip);
 fail_free_acls:
        posix_acl_release(default_acl);
index 50578f8..2559a79 100644 (file)
@@ -261,6 +261,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
        int req;
        u32 lkf;
        char strname[GDLM_STRNAME_BYTES] = "";
+       int error;
 
        req = make_mode(gl->gl_name.ln_sbd, req_state);
        lkf = make_flags(gl, flags, req);
@@ -279,8 +280,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
         * Submit the actual lock request.
         */
 
-       return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
+again:
+       error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
                        GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+       if (error == -EBUSY) {
+               msleep(20);
+               goto again;
+       }
+       return error;
 }
 
 static void gdlm_put_lock(struct gfs2_glock *gl)
@@ -312,8 +319,14 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
                return;
        }
 
+again:
        error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
                           NULL, gl);
+       if (error == -EBUSY) {
+               msleep(20);
+               goto again;
+       }
+
        if (error) {
                fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
                       gl->gl_name.ln_type,
index 0fb3c01..801ad9f 100644 (file)
@@ -680,13 +680,14 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
 /**
  * gfs2_rs_delete - delete a multi-block reservation
  * @ip: The inode for this reservation
- * @wcount: The inode's write count, or NULL
  *
  */
-void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
+void gfs2_rs_delete(struct gfs2_inode *ip)
 {
+       struct inode *inode = &ip->i_inode;
+
        down_write(&ip->i_rw_mutex);
-       if ((wcount == NULL) || (atomic_read(wcount) <= 1))
+       if (atomic_read(&inode->i_writecount) <= 1)
                gfs2_rs_deltree(&ip->i_res);
        up_write(&ip->i_rw_mutex);
 }
@@ -922,15 +923,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        spin_lock_init(&rgd->rd_rsspin);
        mutex_init(&rgd->rd_mutex);
 
-       error = compute_bitstructs(rgd);
-       if (error)
-               goto fail;
-
        error = gfs2_glock_get(sdp, rgd->rd_addr,
                               &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
        if (error)
                goto fail;
 
+       error = compute_bitstructs(rgd);
+       if (error)
+               goto fail_glock;
+
        rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
        rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
        if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -944,6 +945,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        }
 
        error = 0; /* someone else read in the rgrp; free it and ignore it */
+fail_glock:
        gfs2_glock_put(rgd->rd_gl);
 
 fail:
@@ -1415,7 +1417,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
 
        start = r.start >> bs_shift;
        end = start + (r.len >> bs_shift);
-       minlen = max_t(u64, r.minlen,
+       minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
+       minlen = max_t(u64, minlen,
                       q->limits.discard_granularity) >> bs_shift;
 
        if (end <= start || minlen > sdp->sd_max_rg_data)
index 3e2ca1f..46dd94e 100644 (file)
@@ -45,7 +45,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
                             bool dinode, u64 *generation);
 
 extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
+extern void gfs2_rs_delete(struct gfs2_inode *ip);
 extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
                               u64 bstart, u32 blen, int meta);
 extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
index cf9cf66..bdb773e 100644 (file)
@@ -1396,7 +1396,7 @@ out:
        truncate_inode_pages_final(&inode->i_data);
        if (ip->i_qadata)
                gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_deltree(&ip->i_res);
        gfs2_ordered_del_inode(ip);
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
index b288c8a..837cd55 100644 (file)
@@ -415,13 +415,15 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
                jffs2_free_ino_caches(c);
                jffs2_free_raw_node_refs(c);
                ret = -EIO;
-               goto out_free;
+               goto out_sum_exit;
        }
 
        jffs2_calc_trigger_levels(c);
 
        return 0;
 
+ out_sum_exit:
+       jffs2_sum_exit(c);
  out_free:
        kvfree(c->blocks);
 
index 2ac4104..71f03a5 100644 (file)
@@ -603,8 +603,8 @@ out_root:
        jffs2_free_ino_caches(c);
        jffs2_free_raw_node_refs(c);
        kvfree(c->blocks);
- out_inohash:
        jffs2_clear_xattr_subsystem(c);
+ out_inohash:
        kfree(c->inocache_list);
  out_wbuf:
        jffs2_flash_cleanup(c);
index 2e4a867..93a2951 100644 (file)
 #include <linux/mutex.h>
 
 struct jffs2_inode_info {
-       /* We need an internal mutex similar to inode->i_mutex.
+       /* We need an internal mutex similar to inode->i_rwsem.
           Unfortunately, we can't used the existing one, because
           either the GC would deadlock, or we'd have to release it
           before letting GC proceed. Or we'd have to put ugliness
-          into the GC code so it didn't attempt to obtain the i_mutex
+          into the GC code so it didn't attempt to obtain the i_rwsem
           for the inode(s) which are already locked */
        struct mutex sem;
 
index b676056..29671e3 100644 (file)
@@ -136,7 +136,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
                if (!s) {
                        JFFS2_WARNING("Can't allocate memory for summary\n");
                        ret = -ENOMEM;
-                       goto out;
+                       goto out_buf;
                }
        }
 
@@ -275,13 +275,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
        }
        ret = 0;
  out:
+       jffs2_sum_reset_collected(s);
+       kfree(s);
+ out_buf:
        if (buf_size)
                kfree(flashbuf);
 #ifndef __ECOS
        else
                mtd_unpoint(c->mtd, 0, c->mtd->size);
 #endif
-       kfree(s);
        return ret;
 }
 
index c15bfc9..f684c0c 100644 (file)
@@ -1,5 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
 
-netfs-y := read_helper.o stats.o
+netfs-y := \
+       buffered_read.o \
+       io.o \
+       main.o \
+       objects.o
+
+netfs-$(CONFIG_NETFS_STATS) += stats.o
 
 obj-$(CONFIG_NETFS_SUPPORT) := netfs.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
new file mode 100644 (file)
index 0000000..281a88a
--- /dev/null
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level buffered read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Unlock the folios in a read operation.  We need to set PG_fscache on any
+ * folios we're going to write back before we unlock them.
+ */
+void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+       struct folio *folio;
+       unsigned int iopos, account = 0;
+       pgoff_t start_page = rreq->start / PAGE_SIZE;
+       pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
+       bool subreq_failed = false;
+
+       XA_STATE(xas, &rreq->mapping->i_pages, start_page);
+
+       if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
+               __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
+               list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+                       __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
+               }
+       }
+
+       /* Walk through the pagecache and the I/O request lists simultaneously.
+        * We may have a mixture of cached and uncached sections and we only
+        * really want to write out the uncached sections.  This is slightly
+        * complicated by the possibility that we might have huge pages with a
+        * mixture inside.
+        */
+       subreq = list_first_entry(&rreq->subrequests,
+                                 struct netfs_io_subrequest, rreq_link);
+       iopos = 0;
+       subreq_failed = (subreq->error < 0);
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
+
+       rcu_read_lock();
+       xas_for_each(&xas, folio, last_page) {
+               unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
+               unsigned int pgend = pgpos + folio_size(folio);
+               bool pg_failed = false;
+
+               for (;;) {
+                       if (!subreq) {
+                               pg_failed = true;
+                               break;
+                       }
+                       if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+                               folio_start_fscache(folio);
+                       pg_failed |= subreq_failed;
+                       if (pgend < iopos + subreq->len)
+                               break;
+
+                       account += subreq->transferred;
+                       iopos += subreq->len;
+                       if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+                               subreq = list_next_entry(subreq, rreq_link);
+                               subreq_failed = (subreq->error < 0);
+                       } else {
+                               subreq = NULL;
+                               subreq_failed = false;
+                       }
+                       if (pgend == iopos)
+                               break;
+               }
+
+               if (!pg_failed) {
+                       flush_dcache_folio(folio);
+                       folio_mark_uptodate(folio);
+               }
+
+               if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+                       if (folio_index(folio) == rreq->no_unlock_folio &&
+                           test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
+                               _debug("no unlock");
+                       else
+                               folio_unlock(folio);
+               }
+       }
+       rcu_read_unlock();
+
+       task_io_account_read(account);
+       if (rreq->netfs_ops->done)
+               rreq->netfs_ops->done(rreq);
+}
+
+static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
+                                        loff_t *_start, size_t *_len, loff_t i_size)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+       if (cres->ops && cres->ops->expand_readahead)
+               cres->ops->expand_readahead(cres, _start, _len, i_size);
+}
+
+static void netfs_rreq_expand(struct netfs_io_request *rreq,
+                             struct readahead_control *ractl)
+{
+       /* Give the cache a chance to change the request parameters.  The
+        * resultant request must contain the original region.
+        */
+       netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
+
+       /* Give the netfs a chance to change the request parameters.  The
+        * resultant request must contain the original region.
+        */
+       if (rreq->netfs_ops->expand_readahead)
+               rreq->netfs_ops->expand_readahead(rreq);
+
+       /* Expand the request if the cache wants it to start earlier.  Note
+        * that the expansion may get further extended if the VM wishes to
+        * insert THPs and the preferred start and/or end wind up in the middle
+        * of THPs.
+        *
+        * If this is the case, however, the THP size should be an integer
+        * multiple of the cache granule size, so we get a whole number of
+        * granules to deal with.
+        */
+       if (rreq->start  != readahead_pos(ractl) ||
+           rreq->len != readahead_length(ractl)) {
+               readahead_expand(ractl, rreq->start, rreq->len);
+               rreq->start  = readahead_pos(ractl);
+               rreq->len = readahead_length(ractl);
+
+               trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+                                netfs_read_trace_expanded);
+       }
+}
+
+/**
+ * netfs_readahead - Helper to manage a read request
+ * @ractl: The description of the readahead request
+ *
+ * Fulfil a readahead request by drawing data from the cache if possible, or
+ * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
+ * requests from different sources will get munged together.  If necessary, the
+ * readahead window can be expanded in either direction to a more convenient
+ * alighment for RPC efficiency or to make storage in the cache feasible.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+void netfs_readahead(struct readahead_control *ractl)
+{
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
+       int ret;
+
+       _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
+
+       if (readahead_count(ractl) == 0)
+               return;
+
+       rreq = netfs_alloc_request(ractl->mapping, ractl->file,
+                                  readahead_pos(ractl),
+                                  readahead_length(ractl),
+                                  NETFS_READAHEAD);
+       if (IS_ERR(rreq))
+               return;
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto cleanup_free;
+       }
+
+       netfs_stat(&netfs_n_rh_readahead);
+       trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+                        netfs_read_trace_readahead);
+
+       netfs_rreq_expand(rreq, ractl);
+
+       /* Drop the refs on the folios here rather than in the cache or
+        * filesystem.  The locks will be dropped in netfs_rreq_unlock().
+        */
+       while (readahead_folio(ractl))
+               ;
+
+       netfs_begin_read(rreq, false);
+       return;
+
+cleanup_free:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+       return;
+}
+EXPORT_SYMBOL(netfs_readahead);
+
+/**
+ * netfs_readpage - Helper to manage a readpage request
+ * @file: The file to read from
+ * @subpage: A subpage of the folio to read
+ *
+ * Fulfil a readpage request by drawing data from the cache if possible, or the
+ * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
+ * from different sources will get munged together.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_readpage(struct file *file, struct page *subpage)
+{
+       struct folio *folio = page_folio(subpage);
+       struct address_space *mapping = folio_file_mapping(folio);
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(mapping->host);
+       int ret;
+
+       _enter("%lx", folio_index(folio));
+
+       rreq = netfs_alloc_request(mapping, file,
+                                  folio_file_pos(folio), folio_size(folio),
+                                  NETFS_READPAGE);
+       if (IS_ERR(rreq)) {
+               ret = PTR_ERR(rreq);
+               goto alloc_error;
+       }
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto discard;
+       }
+
+       netfs_stat(&netfs_n_rh_readpage);
+       trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
+       return netfs_begin_read(rreq, true);
+
+discard:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+alloc_error:
+       folio_unlock(folio);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_readpage);
+
+/*
+ * Prepare a folio for writing without reading first
+ * @folio: The folio being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ * @always_fill: T if the folio should always be completely filled/cleared
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full folio write
+ * - write that lies in a folio that is completely beyond EOF
+ * - write that covers the folio from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the folio and return true. Otherwise, return false.
+ */
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
+                                bool always_fill)
+{
+       struct inode *inode = folio_inode(folio);
+       loff_t i_size = i_size_read(inode);
+       size_t offset = offset_in_folio(folio, pos);
+       size_t plen = folio_size(folio);
+
+       if (unlikely(always_fill)) {
+               if (pos - offset + len <= i_size)
+                       return false; /* Page entirely before EOF */
+               zero_user_segment(&folio->page, 0, plen);
+               folio_mark_uptodate(folio);
+               return true;
+       }
+
+       /* Full folio write */
+       if (offset == 0 && len >= plen)
+               return true;
+
+       /* Page entirely beyond the end of the file */
+       if (pos - offset >= i_size)
+               goto zero_out;
+
+       /* Write that covers from the start of the folio to EOF or beyond */
+       if (offset == 0 && (pos + len) >= i_size)
+               goto zero_out;
+
+       return false;
+zero_out:
+       zero_user_segments(&folio->page, 0, offset, offset + len, plen);
+       return true;
+}
+
+/**
+ * netfs_write_begin - Helper to prepare for writing
+ * @file: The file to read from
+ * @mapping: The mapping to read from
+ * @pos: File position at which the write will begin
+ * @len: The length of the write (may extend beyond the end of the folio chosen)
+ * @aop_flags: AOP_* flags
+ * @_folio: Where to put the resultant folio
+ * @_fsdata: Place for the netfs to store a cookie
+ *
+ * Pre-read data for a write-begin request by drawing data from the cache if
+ * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together.  If
+ * necessary, the readahead window can be expanded in either direction to a
+ * more convenient alighment for RPC efficiency or to make storage in the cache
+ * feasible.
+ *
+ * The calling netfs must provide a table of operations, only one of which,
+ * issue_op, is mandatory.
+ *
+ * The check_write_begin() operation can be provided to check for and flush
+ * conflicting writes once the folio is grabbed and locked.  It is passed a
+ * pointer to the fsdata cookie that gets returned to the VM to be passed to
+ * write_end.  It is permitted to sleep.  It should return 0 if the request
+ * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
+ * be regot; or return an error.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_write_begin(struct file *file, struct address_space *mapping,
+                     loff_t pos, unsigned int len, unsigned int aop_flags,
+                     struct folio **_folio, void **_fsdata)
+{
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
+       struct folio *folio;
+       unsigned int fgp_flags;
+       pgoff_t index = pos >> PAGE_SHIFT;
+       int ret;
+
+       DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
+
+retry:
+       fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
+       if (aop_flags & AOP_FLAG_NOFS)
+               fgp_flags |= FGP_NOFS;
+       folio = __filemap_get_folio(mapping, index, fgp_flags,
+                                   mapping_gfp_mask(mapping));
+       if (!folio)
+               return -ENOMEM;
+
+       if (ctx->ops->check_write_begin) {
+               /* Allow the netfs (eg. ceph) to flush conflicts. */
+               ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
+               if (ret < 0) {
+                       trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
+                       if (ret == -EAGAIN)
+                               goto retry;
+                       goto error;
+               }
+       }
+
+       if (folio_test_uptodate(folio))
+               goto have_folio;
+
+       /* If the page is beyond the EOF, we want to clear it - unless it's
+        * within the cache granule containing the EOF, in which case we need
+        * to preload the granule.
+        */
+       if (!netfs_is_cache_enabled(ctx) &&
+           netfs_skip_folio_read(folio, pos, len, false)) {
+               netfs_stat(&netfs_n_rh_write_zskip);
+               goto have_folio_no_wait;
+       }
+
+       rreq = netfs_alloc_request(mapping, file,
+                                  folio_file_pos(folio), folio_size(folio),
+                                  NETFS_READ_FOR_WRITE);
+       if (IS_ERR(rreq)) {
+               ret = PTR_ERR(rreq);
+               goto error;
+       }
+       rreq->no_unlock_folio   = folio_index(folio);
+       __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto error_put;
+       }
+
+       netfs_stat(&netfs_n_rh_write_begin);
+       trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
+
+       /* Expand the request to meet caching requirements and download
+        * preferences.
+        */
+       ractl._nr_pages = folio_nr_pages(folio);
+       netfs_rreq_expand(rreq, &ractl);
+
+       /* We hold the folio locks, so we can drop the references */
+       folio_get(folio);
+       while (readahead_folio(&ractl))
+               ;
+
+       ret = netfs_begin_read(rreq, true);
+       if (ret < 0)
+               goto error;
+
+have_folio:
+       ret = folio_wait_fscache_killable(folio);
+       if (ret < 0)
+               goto error;
+have_folio_no_wait:
+       *_folio = folio;
+       _leave(" = 0");
+       return 0;
+
+error_put:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+error:
+       folio_unlock(folio);
+       folio_put(folio);
+       _leave(" = %d", ret);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_write_begin);
index b7f2c44..b7b0e3d 100644 (file)
@@ -5,6 +5,10 @@
  * Written by David Howells (dhowells@redhat.com)
  */
 
+#include <linux/netfs.h>
+#include <linux/fscache.h>
+#include <trace/events/netfs.h>
+
 #ifdef pr_fmt
 #undef pr_fmt
 #endif
 #define pr_fmt(fmt) "netfs: " fmt
 
 /*
- * read_helper.c
+ * buffered_read.c
+ */
+void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
+
+/*
+ * io.c
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
+
+/*
+ * main.c
  */
 extern unsigned int netfs_debug;
 
+/*
+ * objects.c
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+                                            struct file *file,
+                                            loff_t start, size_t len,
+                                            enum netfs_io_origin origin);
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
+void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
+void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
+                      enum netfs_rreq_ref_trace what);
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
+
+static inline void netfs_see_request(struct netfs_io_request *rreq,
+                                    enum netfs_rreq_ref_trace what)
+{
+       trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
+}
+
 /*
  * stats.c
  */
@@ -55,6 +88,21 @@ static inline void netfs_stat_d(atomic_t *stat)
 #define netfs_stat_d(x) do {} while(0)
 #endif
 
+/*
+ * Miscellaneous functions.
+ */
+static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct fscache_cookie *cookie = ctx->cache;
+
+       return fscache_cookie_valid(cookie) && cookie->cache_priv &&
+               fscache_cookie_enabled(cookie);
+#else
+       return false;
+#endif
+}
+
 /*****************************************************************************/
 /*
  * debug tracing
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
new file mode 100644 (file)
index 0000000..4289258
--- /dev/null
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Clear the unread part of an I/O request.
+ */
+static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
+{
+       struct iov_iter iter;
+
+       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
+                       subreq->start + subreq->transferred,
+                       subreq->len   - subreq->transferred);
+       iov_iter_zero(iov_iter_count(&iter), &iter);
+}
+
+static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
+                                       bool was_async)
+{
+       struct netfs_io_subrequest *subreq = priv;
+
+       netfs_subreq_terminated(subreq, transferred_or_error, was_async);
+}
+
+/*
+ * Issue a read against the cache.
+ * - Eats the caller's ref on subreq.
+ */
+static void netfs_read_from_cache(struct netfs_io_request *rreq,
+                                 struct netfs_io_subrequest *subreq,
+                                 enum netfs_read_from_hole read_hole)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+       struct iov_iter iter;
+
+       netfs_stat(&netfs_n_rh_read);
+       iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
+                       subreq->start + subreq->transferred,
+                       subreq->len   - subreq->transferred);
+
+       cres->ops->read(cres, subreq->start, &iter, read_hole,
+                       netfs_cache_read_terminated, subreq);
+}
+
+/*
+ * Fill a subrequest region with zeroes.
+ */
+static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
+                                  struct netfs_io_subrequest *subreq)
+{
+       netfs_stat(&netfs_n_rh_zero);
+       __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+       netfs_subreq_terminated(subreq, 0, false);
+}
+
+/*
+ * Ask the netfs to issue a read request to the server for us.
+ *
+ * The netfs is expected to read from subreq->pos + subreq->transferred to
+ * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
+ * buffer prior to the transferred point as it might clobber dirty data
+ * obtained from the cache.
+ *
+ * Alternatively, the netfs is allowed to indicate one of two things:
+ *
+ * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
+ *   make progress.
+ *
+ * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
+ *   cleared.
+ */
+static void netfs_read_from_server(struct netfs_io_request *rreq,
+                                  struct netfs_io_subrequest *subreq)
+{
+       netfs_stat(&netfs_n_rh_download);
+       rreq->netfs_ops->issue_read(subreq);
+}
+
+/*
+ * Release those waiting.
+ */
+static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
+{
+       trace_netfs_rreq(rreq, netfs_rreq_trace_done);
+       netfs_clear_subrequests(rreq, was_async);
+       netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
+}
+
+/*
+ * Deal with the completion of writing the data to the cache.  We have to clear
+ * the PG_fscache bits on the folios involved and release the caller's ref.
+ *
+ * May be called in softirq mode and we inherit a ref from the caller.
+ */
+static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
+                                         bool was_async)
+{
+       struct netfs_io_subrequest *subreq;
+       struct folio *folio;
+       pgoff_t unlocked = 0;
+       bool have_unlocked = false;
+
+       rcu_read_lock();
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
+
+               xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+                       /* We might have multiple writes from the same huge
+                        * folio, but we mustn't unlock a folio more than once.
+                        */
+                       if (have_unlocked && folio_index(folio) <= unlocked)
+                               continue;
+                       unlocked = folio_index(folio);
+                       folio_end_fscache(folio);
+                       have_unlocked = true;
+               }
+       }
+
+       rcu_read_unlock();
+       netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
+                                      bool was_async)
+{
+       struct netfs_io_subrequest *subreq = priv;
+       struct netfs_io_request *rreq = subreq->rreq;
+
+       if (IS_ERR_VALUE(transferred_or_error)) {
+               netfs_stat(&netfs_n_rh_write_failed);
+               trace_netfs_failure(rreq, subreq, transferred_or_error,
+                                   netfs_fail_copy_to_cache);
+       } else {
+               netfs_stat(&netfs_n_rh_write_done);
+       }
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
+
+       /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_copy_ops))
+               netfs_rreq_unmark_after_write(rreq, was_async);
+
+       netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+}
+
+/*
+ * Perform any outstanding writes to the cache.  We inherit a ref from the
+ * caller.
+ */
+static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+       struct netfs_io_subrequest *subreq, *next, *p;
+       struct iov_iter iter;
+       int ret;
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
+
+       /* We don't want terminating writes trying to wake us up whilst we're
+        * still going through the list.
+        */
+       atomic_inc(&rreq->nr_copy_ops);
+
+       list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
+               if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+                       list_del_init(&subreq->rreq_link);
+                       netfs_put_subrequest(subreq, false,
+                                            netfs_sreq_trace_put_no_copy);
+               }
+       }
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               /* Amalgamate adjacent writes */
+               while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+                       next = list_next_entry(subreq, rreq_link);
+                       if (next->start != subreq->start + subreq->len)
+                               break;
+                       subreq->len += next->len;
+                       list_del_init(&next->rreq_link);
+                       netfs_put_subrequest(next, false,
+                                            netfs_sreq_trace_put_merged);
+               }
+
+               ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
+                                              rreq->i_size, true);
+               if (ret < 0) {
+                       trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
+                       trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
+                       continue;
+               }
+
+               iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
+                               subreq->start, subreq->len);
+
+               atomic_inc(&rreq->nr_copy_ops);
+               netfs_stat(&netfs_n_rh_write);
+               netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
+               trace_netfs_sreq(subreq, netfs_sreq_trace_write);
+               cres->ops->write(cres, subreq->start, &iter,
+                                netfs_rreq_copy_terminated, subreq);
+       }
+
+       /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_copy_ops))
+               netfs_rreq_unmark_after_write(rreq, false);
+}
+
+static void netfs_rreq_write_to_cache_work(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+
+       netfs_rreq_do_write_to_cache(rreq);
+}
+
+static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
+{
+       rreq->work.func = netfs_rreq_write_to_cache_work;
+       if (!queue_work(system_unbound_wq, &rreq->work))
+               BUG();
+}
+
+/*
+ * Handle a short read.
+ */
+static void netfs_rreq_short_read(struct netfs_io_request *rreq,
+                                 struct netfs_io_subrequest *subreq)
+{
+       __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+       __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
+
+       netfs_stat(&netfs_n_rh_short_read);
+       trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
+
+       netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
+       atomic_inc(&rreq->nr_outstanding);
+       if (subreq->source == NETFS_READ_FROM_CACHE)
+               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
+       else
+               netfs_read_from_server(rreq, subreq);
+}
+
+/*
+ * Resubmit any short or failed operations.  Returns true if we got the rreq
+ * ref back.
+ */
+static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       WARN_ON(in_interrupt());
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+
+       /* We don't want terminating submissions trying to wake us up whilst
+        * we're still going through the list.
+        */
+       atomic_inc(&rreq->nr_outstanding);
+
+       __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               if (subreq->error) {
+                       if (subreq->source != NETFS_READ_FROM_CACHE)
+                               break;
+                       subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+                       subreq->error = 0;
+                       netfs_stat(&netfs_n_rh_download_instead);
+                       trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
+                       netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+                       atomic_inc(&rreq->nr_outstanding);
+                       netfs_read_from_server(rreq, subreq);
+               } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
+                       netfs_rreq_short_read(rreq, subreq);
+               }
+       }
+
+       /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_outstanding))
+               return true;
+
+       wake_up_var(&rreq->nr_outstanding);
+       return false;
+}
+
+/*
+ * Check to see if the data read is still valid.
+ */
+static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       if (!rreq->netfs_ops->is_still_valid ||
+           rreq->netfs_ops->is_still_valid(rreq))
+               return;
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               if (subreq->source == NETFS_READ_FROM_CACHE) {
+                       subreq->error = -ESTALE;
+                       __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+               }
+       }
+}
+
+/*
+ * Assess the state of a read request and decide what to do next.
+ *
+ * Note that we could be in an ordinary kernel thread, on a workqueue or in
+ * softirq context at this point.  We inherit a ref from the caller.
+ */
+static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
+{
+       trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
+
+again:
+       netfs_rreq_is_still_valid(rreq);
+
+       if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
+           test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
+               if (netfs_rreq_perform_resubmissions(rreq))
+                       goto again;
+               return;
+       }
+
+       netfs_rreq_unlock_folios(rreq);
+
+       clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+       wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+
+       if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
+               return netfs_rreq_write_to_cache(rreq);
+
+       netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_work(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+       netfs_rreq_assess(rreq, false);
+}
+
+/*
+ * Handle the completion of all outstanding I/O operations on a read request.
+ * We inherit a ref from the caller.
+ */
+static void netfs_rreq_terminated(struct netfs_io_request *rreq,
+                                 bool was_async)
+{
+       if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
+           was_async) {
+               if (!queue_work(system_unbound_wq, &rreq->work))
+                       BUG();
+       } else {
+               netfs_rreq_assess(rreq, was_async);
+       }
+}
+
+/**
+ * netfs_subreq_terminated - Note the termination of an I/O operation.
+ * @subreq: The I/O request that has terminated.
+ * @transferred_or_error: The amount of data transferred or an error code.
+ * @was_async: The termination was asynchronous
+ *
+ * This tells the read helper that a contributory I/O operation has terminated,
+ * one way or another, and that it should integrate the results.
+ *
+ * The caller indicates in @transferred_or_error the outcome of the operation,
+ * supplying a positive value to indicate the number of bytes transferred, 0 to
+ * indicate a failure to transfer anything that should be retried or a negative
+ * error code.  The helper will look after reissuing I/O operations as
+ * appropriate and writing downloaded data to the cache.
+ *
+ * If @was_async is true, the caller might be running in softirq or interrupt
+ * context and we can't sleep.
+ */
+void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
+                            ssize_t transferred_or_error,
+                            bool was_async)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+       int u;
+
+       _enter("[%u]{%llx,%lx},%zd",
+              subreq->debug_index, subreq->start, subreq->flags,
+              transferred_or_error);
+
+       switch (subreq->source) {
+       case NETFS_READ_FROM_CACHE:
+               netfs_stat(&netfs_n_rh_read_done);
+               break;
+       case NETFS_DOWNLOAD_FROM_SERVER:
+               netfs_stat(&netfs_n_rh_download_done);
+               break;
+       default:
+               break;
+       }
+
+       if (IS_ERR_VALUE(transferred_or_error)) {
+               subreq->error = transferred_or_error;
+               trace_netfs_failure(rreq, subreq, transferred_or_error,
+                                   netfs_fail_read);
+               goto failed;
+       }
+
+       if (WARN(transferred_or_error > subreq->len - subreq->transferred,
+                "Subreq overread: R%x[%x] %zd > %zu - %zu",
+                rreq->debug_id, subreq->debug_index,
+                transferred_or_error, subreq->len, subreq->transferred))
+               transferred_or_error = subreq->len - subreq->transferred;
+
+       subreq->error = 0;
+       subreq->transferred += transferred_or_error;
+       if (subreq->transferred < subreq->len)
+               goto incomplete;
+
+complete:
+       __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+       if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+               set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
+
+out:
+       trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+       /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+       u = atomic_dec_return(&rreq->nr_outstanding);
+       if (u == 0)
+               netfs_rreq_terminated(rreq, was_async);
+       else if (u == 1)
+               wake_up_var(&rreq->nr_outstanding);
+
+       netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+       return;
+
+incomplete:
+       if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
+               netfs_clear_unread(subreq);
+               subreq->transferred = subreq->len;
+               goto complete;
+       }
+
+       if (transferred_or_error == 0) {
+               if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+                       subreq->error = -ENODATA;
+                       goto failed;
+               }
+       } else {
+               __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+       }
+
+       __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+       set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       goto out;
+
+failed:
+       if (subreq->source == NETFS_READ_FROM_CACHE) {
+               netfs_stat(&netfs_n_rh_read_failed);
+               set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       } else {
+               netfs_stat(&netfs_n_rh_download_failed);
+               set_bit(NETFS_RREQ_FAILED, &rreq->flags);
+               rreq->error = subreq->error;
+       }
+       goto out;
+}
+EXPORT_SYMBOL(netfs_subreq_terminated);
+
+static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
+                                                      loff_t i_size)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+       if (cres->ops)
+               return cres->ops->prepare_read(subreq, i_size);
+       if (subreq->start >= rreq->i_size)
+               return NETFS_FILL_WITH_ZEROES;
+       return NETFS_DOWNLOAD_FROM_SERVER;
+}
+
+/*
+ * Work out what sort of subrequest the next one will be.
+ */
+static enum netfs_io_source
+netfs_rreq_prepare_read(struct netfs_io_request *rreq,
+                       struct netfs_io_subrequest *subreq)
+{
+       enum netfs_io_source source;
+
+       _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
+
+       source = netfs_cache_prepare_read(subreq, rreq->i_size);
+       if (source == NETFS_INVALID_READ)
+               goto out;
+
+       if (source == NETFS_DOWNLOAD_FROM_SERVER) {
+               /* Call out to the netfs to let it shrink the request to fit
+                * its own I/O sizes and boundaries.  If it shinks it here, it
+                * will be called again to make simultaneous calls; if it wants
+                * to make serial calls, it can indicate a short read and then
+                * we will call it again.
+                */
+               if (subreq->len > rreq->i_size - subreq->start)
+                       subreq->len = rreq->i_size - subreq->start;
+
+               if (rreq->netfs_ops->clamp_length &&
+                   !rreq->netfs_ops->clamp_length(subreq)) {
+                       source = NETFS_INVALID_READ;
+                       goto out;
+               }
+       }
+
+       if (WARN_ON(subreq->len == 0))
+               source = NETFS_INVALID_READ;
+
+out:
+       subreq->source = source;
+       trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+       return source;
+}
+
+/*
+ * Slice off a piece of a read request and submit an I/O request for it.
+ */
+static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
+                                   unsigned int *_debug_index)
+{
+       struct netfs_io_subrequest *subreq;
+       enum netfs_io_source source;
+
+       subreq = netfs_alloc_subrequest(rreq);
+       if (!subreq)
+               return false;
+
+       subreq->debug_index     = (*_debug_index)++;
+       subreq->start           = rreq->start + rreq->submitted;
+       subreq->len             = rreq->len   - rreq->submitted;
+
+       _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
+       list_add_tail(&subreq->rreq_link, &rreq->subrequests);
+
+       /* Call out to the cache to find out what it can do with the remaining
+        * subset.  It tells us in subreq->flags what it decided should be done
+        * and adjusts subreq->len down if the subset crosses a cache boundary.
+        *
+        * Then when we hand the subset, it can choose to take a subset of that
+        * (the starts must coincide), in which case, we go around the loop
+        * again and ask it to download the next piece.
+        */
+       source = netfs_rreq_prepare_read(rreq, subreq);
+       if (source == NETFS_INVALID_READ)
+               goto subreq_failed;
+
+       atomic_inc(&rreq->nr_outstanding);
+
+       rreq->submitted += subreq->len;
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+       switch (source) {
+       case NETFS_FILL_WITH_ZEROES:
+               netfs_fill_with_zeroes(rreq, subreq);
+               break;
+       case NETFS_DOWNLOAD_FROM_SERVER:
+               netfs_read_from_server(rreq, subreq);
+               break;
+       case NETFS_READ_FROM_CACHE:
+               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
+               break;
+       default:
+               BUG();
+       }
+
+       return true;
+
+subreq_failed:
+       rreq->error = subreq->error;
+       netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
+       return false;
+}
+
+/*
+ * Begin the process of reading in a chunk of data, where that data may be
+ * stitched together from multiple sources, including multiple servers and the
+ * local cache.
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
+{
+       unsigned int debug_index = 0;
+       int ret;
+
+       _enter("R=%x %llx-%llx",
+              rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+
+       if (rreq->len == 0) {
+               pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
+               return -EIO;
+       }
+
+       INIT_WORK(&rreq->work, netfs_rreq_work);
+
+       if (sync)
+               netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+
+       /* Chop the read into slices according to what the cache and the netfs
+        * want and submit each one.
+        */
+       atomic_set(&rreq->nr_outstanding, 1);
+       do {
+               if (!netfs_rreq_submit_slice(rreq, &debug_index))
+                       break;
+
+       } while (rreq->submitted < rreq->len);
+
+       if (sync) {
+               /* Keep nr_outstanding incremented so that the ref always belongs to
+                * us, and the service code isn't punted off to a random thread pool to
+                * process.
+                */
+               for (;;) {
+                       wait_var_event(&rreq->nr_outstanding,
+                                      atomic_read(&rreq->nr_outstanding) == 1);
+                       netfs_rreq_assess(rreq, false);
+                       if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+                               break;
+                       cond_resched();
+               }
+
+               ret = rreq->error;
+               if (ret == 0 && rreq->submitted < rreq->len) {
+                       trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+                       ret = -EIO;
+               }
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
+       } else {
+               /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+               if (atomic_dec_and_test(&rreq->nr_outstanding))
+                       netfs_rreq_assess(rreq, false);
+               ret = 0;
+       }
+       return ret;
+}
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
new file mode 100644 (file)
index 0000000..0685687
--- /dev/null
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Miscellaneous bits for the netfs support library.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/netfs.h>
+
+MODULE_DESCRIPTION("Network fs support");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+unsigned netfs_debug;
+module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
new file mode 100644 (file)
index 0000000..e86107b
--- /dev/null
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Object lifetime handling and tracing.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Allocate an I/O request and initialise it.
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+                                            struct file *file,
+                                            loff_t start, size_t len,
+                                            enum netfs_io_origin origin)
+{
+       static atomic_t debug_ids;
+       struct inode *inode = file ? file_inode(file) : mapping->host;
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+       struct netfs_io_request *rreq;
+       int ret;
+
+       rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
+       if (!rreq)
+               return ERR_PTR(-ENOMEM);
+
+       rreq->start     = start;
+       rreq->len       = len;
+       rreq->origin    = origin;
+       rreq->netfs_ops = ctx->ops;
+       rreq->mapping   = mapping;
+       rreq->inode     = inode;
+       rreq->i_size    = i_size_read(inode);
+       rreq->debug_id  = atomic_inc_return(&debug_ids);
+       INIT_LIST_HEAD(&rreq->subrequests);
+       refcount_set(&rreq->ref, 1);
+       __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+       if (rreq->netfs_ops->init_request) {
+               ret = rreq->netfs_ops->init_request(rreq, file);
+               if (ret < 0) {
+                       kfree(rreq);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       netfs_stat(&netfs_n_rh_rreq);
+       return rreq;
+}
+
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
+{
+       int r;
+
+       __refcount_inc(&rreq->ref, &r);
+       trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
+}
+
+void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
+{
+       struct netfs_io_subrequest *subreq;
+
+       while (!list_empty(&rreq->subrequests)) {
+               subreq = list_first_entry(&rreq->subrequests,
+                                         struct netfs_io_subrequest, rreq_link);
+               list_del(&subreq->rreq_link);
+               netfs_put_subrequest(subreq, was_async,
+                                    netfs_sreq_trace_put_clear);
+       }
+}
+
+static void netfs_free_request(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+
+       netfs_clear_subrequests(rreq, false);
+       if (rreq->netfs_priv)
+               rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
+       trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+       if (rreq->cache_resources.ops)
+               rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
+       kfree(rreq);
+       netfs_stat_d(&netfs_n_rh_rreq);
+}
+
+void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
+                      enum netfs_rreq_ref_trace what)
+{
+       unsigned int debug_id = rreq->debug_id;
+       bool dead;
+       int r;
+
+       dead = __refcount_dec_and_test(&rreq->ref, &r);
+       trace_netfs_rreq_ref(debug_id, r - 1, what);
+       if (dead) {
+               if (was_async) {
+                       rreq->work.func = netfs_free_request;
+                       if (!queue_work(system_unbound_wq, &rreq->work))
+                               BUG();
+               } else {
+                       netfs_free_request(&rreq->work);
+               }
+       }
+}
+
+/*
+ * Allocate and partially initialise an I/O request structure.
+ */
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
+       if (subreq) {
+               INIT_LIST_HEAD(&subreq->rreq_link);
+               refcount_set(&subreq->ref, 2);
+               subreq->rreq = rreq;
+               netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
+               netfs_stat(&netfs_n_rh_sreq);
+       }
+
+       return subreq;
+}
+
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+                         enum netfs_sreq_ref_trace what)
+{
+       int r;
+
+       __refcount_inc(&subreq->ref, &r);
+       trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
+                            what);
+}
+
+static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
+                                 bool was_async)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_free);
+       kfree(subreq);
+       netfs_stat_d(&netfs_n_rh_sreq);
+       netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
+}
+
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
+                         enum netfs_sreq_ref_trace what)
+{
+       unsigned int debug_index = subreq->debug_index;
+       unsigned int debug_id = subreq->rreq->debug_id;
+       bool dead;
+       int r;
+
+       dead = __refcount_dec_and_test(&subreq->ref, &r);
+       trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
+       if (dead)
+               netfs_free_subrequest(subreq, was_async);
+}
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
deleted file mode 100644 (file)
index 501da99..0000000
+++ /dev/null
@@ -1,1205 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Network filesystem high-level read support.
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/sched/mm.h>
-#include <linux/task_io_accounting_ops.h>
-#include <linux/netfs.h>
-#include "internal.h"
-#define CREATE_TRACE_POINTS
-#include <trace/events/netfs.h>
-
-MODULE_DESCRIPTION("Network fs support");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-unsigned netfs_debug;
-module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
-
-static void netfs_rreq_work(struct work_struct *);
-static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool);
-
-static void netfs_put_subrequest(struct netfs_read_subrequest *subreq,
-                                bool was_async)
-{
-       if (refcount_dec_and_test(&subreq->usage))
-               __netfs_put_subrequest(subreq, was_async);
-}
-
-static struct netfs_read_request *netfs_alloc_read_request(
-       const struct netfs_read_request_ops *ops, void *netfs_priv,
-       struct file *file)
-{
-       static atomic_t debug_ids;
-       struct netfs_read_request *rreq;
-
-       rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL);
-       if (rreq) {
-               rreq->netfs_ops = ops;
-               rreq->netfs_priv = netfs_priv;
-               rreq->inode     = file_inode(file);
-               rreq->i_size    = i_size_read(rreq->inode);
-               rreq->debug_id  = atomic_inc_return(&debug_ids);
-               INIT_LIST_HEAD(&rreq->subrequests);
-               INIT_WORK(&rreq->work, netfs_rreq_work);
-               refcount_set(&rreq->usage, 1);
-               __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
-               if (ops->init_rreq)
-                       ops->init_rreq(rreq, file);
-               netfs_stat(&netfs_n_rh_rreq);
-       }
-
-       return rreq;
-}
-
-static void netfs_get_read_request(struct netfs_read_request *rreq)
-{
-       refcount_inc(&rreq->usage);
-}
-
-static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq,
-                                    bool was_async)
-{
-       struct netfs_read_subrequest *subreq;
-
-       while (!list_empty(&rreq->subrequests)) {
-               subreq = list_first_entry(&rreq->subrequests,
-                                         struct netfs_read_subrequest, rreq_link);
-               list_del(&subreq->rreq_link);
-               netfs_put_subrequest(subreq, was_async);
-       }
-}
-
-static void netfs_free_read_request(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-       netfs_rreq_clear_subreqs(rreq, false);
-       if (rreq->netfs_priv)
-               rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
-       trace_netfs_rreq(rreq, netfs_rreq_trace_free);
-       if (rreq->cache_resources.ops)
-               rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
-       kfree(rreq);
-       netfs_stat_d(&netfs_n_rh_rreq);
-}
-
-static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async)
-{
-       if (refcount_dec_and_test(&rreq->usage)) {
-               if (was_async) {
-                       rreq->work.func = netfs_free_read_request;
-                       if (!queue_work(system_unbound_wq, &rreq->work))
-                               BUG();
-               } else {
-                       netfs_free_read_request(&rreq->work);
-               }
-       }
-}
-
-/*
- * Allocate and partially initialise an I/O request structure.
- */
-static struct netfs_read_subrequest *netfs_alloc_subrequest(
-       struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL);
-       if (subreq) {
-               INIT_LIST_HEAD(&subreq->rreq_link);
-               refcount_set(&subreq->usage, 2);
-               subreq->rreq = rreq;
-               netfs_get_read_request(rreq);
-               netfs_stat(&netfs_n_rh_sreq);
-       }
-
-       return subreq;
-}
-
-static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq)
-{
-       refcount_inc(&subreq->usage);
-}
-
-static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq,
-                                  bool was_async)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_free);
-       kfree(subreq);
-       netfs_stat_d(&netfs_n_rh_sreq);
-       netfs_put_read_request(rreq, was_async);
-}
-
-/*
- * Clear the unread part of an I/O request.
- */
-static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
-{
-       struct iov_iter iter;
-
-       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-       iov_iter_zero(iov_iter_count(&iter), &iter);
-}
-
-static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
-                                       bool was_async)
-{
-       struct netfs_read_subrequest *subreq = priv;
-
-       netfs_subreq_terminated(subreq, transferred_or_error, was_async);
-}
-
-/*
- * Issue a read against the cache.
- * - Eats the caller's ref on subreq.
- */
-static void netfs_read_from_cache(struct netfs_read_request *rreq,
-                                 struct netfs_read_subrequest *subreq,
-                                 enum netfs_read_from_hole read_hole)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct iov_iter iter;
-
-       netfs_stat(&netfs_n_rh_read);
-       iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-
-       cres->ops->read(cres, subreq->start, &iter, read_hole,
-                       netfs_cache_read_terminated, subreq);
-}
-
-/*
- * Fill a subrequest region with zeroes.
- */
-static void netfs_fill_with_zeroes(struct netfs_read_request *rreq,
-                                  struct netfs_read_subrequest *subreq)
-{
-       netfs_stat(&netfs_n_rh_zero);
-       __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
-       netfs_subreq_terminated(subreq, 0, false);
-}
-
-/*
- * Ask the netfs to issue a read request to the server for us.
- *
- * The netfs is expected to read from subreq->pos + subreq->transferred to
- * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
- * buffer prior to the transferred point as it might clobber dirty data
- * obtained from the cache.
- *
- * Alternatively, the netfs is allowed to indicate one of two things:
- *
- * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
- *   make progress.
- *
- * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
- *   cleared.
- */
-static void netfs_read_from_server(struct netfs_read_request *rreq,
-                                  struct netfs_read_subrequest *subreq)
-{
-       netfs_stat(&netfs_n_rh_download);
-       rreq->netfs_ops->issue_op(subreq);
-}
-
-/*
- * Release those waiting.
- */
-static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async)
-{
-       trace_netfs_rreq(rreq, netfs_rreq_trace_done);
-       netfs_rreq_clear_subreqs(rreq, was_async);
-       netfs_put_read_request(rreq, was_async);
-}
-
-/*
- * Deal with the completion of writing the data to the cache.  We have to clear
- * the PG_fscache bits on the folios involved and release the caller's ref.
- *
- * May be called in softirq mode and we inherit a ref from the caller.
- */
-static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
-                                         bool was_async)
-{
-       struct netfs_read_subrequest *subreq;
-       struct folio *folio;
-       pgoff_t unlocked = 0;
-       bool have_unlocked = false;
-
-       rcu_read_lock();
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
-
-               xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
-                       /* We might have multiple writes from the same huge
-                        * folio, but we mustn't unlock a folio more than once.
-                        */
-                       if (have_unlocked && folio_index(folio) <= unlocked)
-                               continue;
-                       unlocked = folio_index(folio);
-                       folio_end_fscache(folio);
-                       have_unlocked = true;
-               }
-       }
-
-       rcu_read_unlock();
-       netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
-                                      bool was_async)
-{
-       struct netfs_read_subrequest *subreq = priv;
-       struct netfs_read_request *rreq = subreq->rreq;
-
-       if (IS_ERR_VALUE(transferred_or_error)) {
-               netfs_stat(&netfs_n_rh_write_failed);
-               trace_netfs_failure(rreq, subreq, transferred_or_error,
-                                   netfs_fail_copy_to_cache);
-       } else {
-               netfs_stat(&netfs_n_rh_write_done);
-       }
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
-
-       /* If we decrement nr_wr_ops to 0, the ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_wr_ops))
-               netfs_rreq_unmark_after_write(rreq, was_async);
-
-       netfs_put_subrequest(subreq, was_async);
-}
-
-/*
- * Perform any outstanding writes to the cache.  We inherit a ref from the
- * caller.
- */
-static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct netfs_read_subrequest *subreq, *next, *p;
-       struct iov_iter iter;
-       int ret;
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_write);
-
-       /* We don't want terminating writes trying to wake us up whilst we're
-        * still going through the list.
-        */
-       atomic_inc(&rreq->nr_wr_ops);
-
-       list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
-               if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
-                       list_del_init(&subreq->rreq_link);
-                       netfs_put_subrequest(subreq, false);
-               }
-       }
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               /* Amalgamate adjacent writes */
-               while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
-                       next = list_next_entry(subreq, rreq_link);
-                       if (next->start != subreq->start + subreq->len)
-                               break;
-                       subreq->len += next->len;
-                       list_del_init(&next->rreq_link);
-                       netfs_put_subrequest(next, false);
-               }
-
-               ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
-                                              rreq->i_size, true);
-               if (ret < 0) {
-                       trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
-                       trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
-                       continue;
-               }
-
-               iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
-                               subreq->start, subreq->len);
-
-               atomic_inc(&rreq->nr_wr_ops);
-               netfs_stat(&netfs_n_rh_write);
-               netfs_get_read_subrequest(subreq);
-               trace_netfs_sreq(subreq, netfs_sreq_trace_write);
-               cres->ops->write(cres, subreq->start, &iter,
-                                netfs_rreq_copy_terminated, subreq);
-       }
-
-       /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_wr_ops))
-               netfs_rreq_unmark_after_write(rreq, false);
-}
-
-static void netfs_rreq_write_to_cache_work(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-
-       netfs_rreq_do_write_to_cache(rreq);
-}
-
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
-{
-       rreq->work.func = netfs_rreq_write_to_cache_work;
-       if (!queue_work(system_unbound_wq, &rreq->work))
-               BUG();
-}
-
-/*
- * Unlock the folios in a read operation.  We need to set PG_fscache on any
- * folios we're going to write back before we unlock them.
- */
-static void netfs_rreq_unlock(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-       struct folio *folio;
-       unsigned int iopos, account = 0;
-       pgoff_t start_page = rreq->start / PAGE_SIZE;
-       pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
-       bool subreq_failed = false;
-
-       XA_STATE(xas, &rreq->mapping->i_pages, start_page);
-
-       if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
-               __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
-               list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-                       __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
-               }
-       }
-
-       /* Walk through the pagecache and the I/O request lists simultaneously.
-        * We may have a mixture of cached and uncached sections and we only
-        * really want to write out the uncached sections.  This is slightly
-        * complicated by the possibility that we might have huge pages with a
-        * mixture inside.
-        */
-       subreq = list_first_entry(&rreq->subrequests,
-                                 struct netfs_read_subrequest, rreq_link);
-       iopos = 0;
-       subreq_failed = (subreq->error < 0);
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
-
-       rcu_read_lock();
-       xas_for_each(&xas, folio, last_page) {
-               unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
-               unsigned int pgend = pgpos + folio_size(folio);
-               bool pg_failed = false;
-
-               for (;;) {
-                       if (!subreq) {
-                               pg_failed = true;
-                               break;
-                       }
-                       if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
-                               folio_start_fscache(folio);
-                       pg_failed |= subreq_failed;
-                       if (pgend < iopos + subreq->len)
-                               break;
-
-                       account += subreq->transferred;
-                       iopos += subreq->len;
-                       if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
-                               subreq = list_next_entry(subreq, rreq_link);
-                               subreq_failed = (subreq->error < 0);
-                       } else {
-                               subreq = NULL;
-                               subreq_failed = false;
-                       }
-                       if (pgend == iopos)
-                               break;
-               }
-
-               if (!pg_failed) {
-                       flush_dcache_folio(folio);
-                       folio_mark_uptodate(folio);
-               }
-
-               if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
-                       if (folio_index(folio) == rreq->no_unlock_folio &&
-                           test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
-                               _debug("no unlock");
-                       else
-                               folio_unlock(folio);
-               }
-       }
-       rcu_read_unlock();
-
-       task_io_account_read(account);
-       if (rreq->netfs_ops->done)
-               rreq->netfs_ops->done(rreq);
-}
-
-/*
- * Handle a short read.
- */
-static void netfs_rreq_short_read(struct netfs_read_request *rreq,
-                                 struct netfs_read_subrequest *subreq)
-{
-       __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
-       __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
-
-       netfs_stat(&netfs_n_rh_short_read);
-       trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
-
-       netfs_get_read_subrequest(subreq);
-       atomic_inc(&rreq->nr_rd_ops);
-       if (subreq->source == NETFS_READ_FROM_CACHE)
-               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
-       else
-               netfs_read_from_server(rreq, subreq);
-}
-
-/*
- * Resubmit any short or failed operations.  Returns true if we got the rreq
- * ref back.
- */
-static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       WARN_ON(in_interrupt());
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
-
-       /* We don't want terminating submissions trying to wake us up whilst
-        * we're still going through the list.
-        */
-       atomic_inc(&rreq->nr_rd_ops);
-
-       __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               if (subreq->error) {
-                       if (subreq->source != NETFS_READ_FROM_CACHE)
-                               break;
-                       subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
-                       subreq->error = 0;
-                       netfs_stat(&netfs_n_rh_download_instead);
-                       trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
-                       netfs_get_read_subrequest(subreq);
-                       atomic_inc(&rreq->nr_rd_ops);
-                       netfs_read_from_server(rreq, subreq);
-               } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) {
-                       netfs_rreq_short_read(rreq, subreq);
-               }
-       }
-
-       /* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_rd_ops))
-               return true;
-
-       wake_up_var(&rreq->nr_rd_ops);
-       return false;
-}
-
-/*
- * Check to see if the data read is still valid.
- */
-static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       if (!rreq->netfs_ops->is_still_valid ||
-           rreq->netfs_ops->is_still_valid(rreq))
-               return;
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               if (subreq->source == NETFS_READ_FROM_CACHE) {
-                       subreq->error = -ESTALE;
-                       __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-               }
-       }
-}
-
-/*
- * Assess the state of a read request and decide what to do next.
- *
- * Note that we could be in an ordinary kernel thread, on a workqueue or in
- * softirq context at this point.  We inherit a ref from the caller.
- */
-static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
-{
-       trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
-
-again:
-       netfs_rreq_is_still_valid(rreq);
-
-       if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
-           test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
-               if (netfs_rreq_perform_resubmissions(rreq))
-                       goto again;
-               return;
-       }
-
-       netfs_rreq_unlock(rreq);
-
-       clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
-       wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
-
-       if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
-               return netfs_rreq_write_to_cache(rreq);
-
-       netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_work(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-       netfs_rreq_assess(rreq, false);
-}
-
-/*
- * Handle the completion of all outstanding I/O operations on a read request.
- * We inherit a ref from the caller.
- */
-static void netfs_rreq_terminated(struct netfs_read_request *rreq,
-                                 bool was_async)
-{
-       if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
-           was_async) {
-               if (!queue_work(system_unbound_wq, &rreq->work))
-                       BUG();
-       } else {
-               netfs_rreq_assess(rreq, was_async);
-       }
-}
-
-/**
- * netfs_subreq_terminated - Note the termination of an I/O operation.
- * @subreq: The I/O request that has terminated.
- * @transferred_or_error: The amount of data transferred or an error code.
- * @was_async: The termination was asynchronous
- *
- * This tells the read helper that a contributory I/O operation has terminated,
- * one way or another, and that it should integrate the results.
- *
- * The caller indicates in @transferred_or_error the outcome of the operation,
- * supplying a positive value to indicate the number of bytes transferred, 0 to
- * indicate a failure to transfer anything that should be retried or a negative
- * error code.  The helper will look after reissuing I/O operations as
- * appropriate and writing downloaded data to the cache.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
- */
-void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
-                            ssize_t transferred_or_error,
-                            bool was_async)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-       int u;
-
-       _enter("[%u]{%llx,%lx},%zd",
-              subreq->debug_index, subreq->start, subreq->flags,
-              transferred_or_error);
-
-       switch (subreq->source) {
-       case NETFS_READ_FROM_CACHE:
-               netfs_stat(&netfs_n_rh_read_done);
-               break;
-       case NETFS_DOWNLOAD_FROM_SERVER:
-               netfs_stat(&netfs_n_rh_download_done);
-               break;
-       default:
-               break;
-       }
-
-       if (IS_ERR_VALUE(transferred_or_error)) {
-               subreq->error = transferred_or_error;
-               trace_netfs_failure(rreq, subreq, transferred_or_error,
-                                   netfs_fail_read);
-               goto failed;
-       }
-
-       if (WARN(transferred_or_error > subreq->len - subreq->transferred,
-                "Subreq overread: R%x[%x] %zd > %zu - %zu",
-                rreq->debug_id, subreq->debug_index,
-                transferred_or_error, subreq->len, subreq->transferred))
-               transferred_or_error = subreq->len - subreq->transferred;
-
-       subreq->error = 0;
-       subreq->transferred += transferred_or_error;
-       if (subreq->transferred < subreq->len)
-               goto incomplete;
-
-complete:
-       __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
-       if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
-               set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
-
-out:
-       trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
-       /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
-       u = atomic_dec_return(&rreq->nr_rd_ops);
-       if (u == 0)
-               netfs_rreq_terminated(rreq, was_async);
-       else if (u == 1)
-               wake_up_var(&rreq->nr_rd_ops);
-
-       netfs_put_subrequest(subreq, was_async);
-       return;
-
-incomplete:
-       if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
-               netfs_clear_unread(subreq);
-               subreq->transferred = subreq->len;
-               goto complete;
-       }
-
-       if (transferred_or_error == 0) {
-               if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
-                       subreq->error = -ENODATA;
-                       goto failed;
-               }
-       } else {
-               __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
-       }
-
-       __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
-       set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       goto out;
-
-failed:
-       if (subreq->source == NETFS_READ_FROM_CACHE) {
-               netfs_stat(&netfs_n_rh_read_failed);
-               set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       } else {
-               netfs_stat(&netfs_n_rh_download_failed);
-               set_bit(NETFS_RREQ_FAILED, &rreq->flags);
-               rreq->error = subreq->error;
-       }
-       goto out;
-}
-EXPORT_SYMBOL(netfs_subreq_terminated);
-
-static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq,
-                                                      loff_t i_size)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-
-       if (cres->ops)
-               return cres->ops->prepare_read(subreq, i_size);
-       if (subreq->start >= rreq->i_size)
-               return NETFS_FILL_WITH_ZEROES;
-       return NETFS_DOWNLOAD_FROM_SERVER;
-}
-
-/*
- * Work out what sort of subrequest the next one will be.
- */
-static enum netfs_read_source
-netfs_rreq_prepare_read(struct netfs_read_request *rreq,
-                       struct netfs_read_subrequest *subreq)
-{
-       enum netfs_read_source source;
-
-       _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
-
-       source = netfs_cache_prepare_read(subreq, rreq->i_size);
-       if (source == NETFS_INVALID_READ)
-               goto out;
-
-       if (source == NETFS_DOWNLOAD_FROM_SERVER) {
-               /* Call out to the netfs to let it shrink the request to fit
-                * its own I/O sizes and boundaries.  If it shinks it here, it
-                * will be called again to make simultaneous calls; if it wants
-                * to make serial calls, it can indicate a short read and then
-                * we will call it again.
-                */
-               if (subreq->len > rreq->i_size - subreq->start)
-                       subreq->len = rreq->i_size - subreq->start;
-
-               if (rreq->netfs_ops->clamp_length &&
-                   !rreq->netfs_ops->clamp_length(subreq)) {
-                       source = NETFS_INVALID_READ;
-                       goto out;
-               }
-       }
-
-       if (WARN_ON(subreq->len == 0))
-               source = NETFS_INVALID_READ;
-
-out:
-       subreq->source = source;
-       trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
-       return source;
-}
-
-/*
- * Slice off a piece of a read request and submit an I/O request for it.
- */
-static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
-                                   unsigned int *_debug_index)
-{
-       struct netfs_read_subrequest *subreq;
-       enum netfs_read_source source;
-
-       subreq = netfs_alloc_subrequest(rreq);
-       if (!subreq)
-               return false;
-
-       subreq->debug_index     = (*_debug_index)++;
-       subreq->start           = rreq->start + rreq->submitted;
-       subreq->len             = rreq->len   - rreq->submitted;
-
-       _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
-       list_add_tail(&subreq->rreq_link, &rreq->subrequests);
-
-       /* Call out to the cache to find out what it can do with the remaining
-        * subset.  It tells us in subreq->flags what it decided should be done
-        * and adjusts subreq->len down if the subset crosses a cache boundary.
-        *
-        * Then when we hand the subset, it can choose to take a subset of that
-        * (the starts must coincide), in which case, we go around the loop
-        * again and ask it to download the next piece.
-        */
-       source = netfs_rreq_prepare_read(rreq, subreq);
-       if (source == NETFS_INVALID_READ)
-               goto subreq_failed;
-
-       atomic_inc(&rreq->nr_rd_ops);
-
-       rreq->submitted += subreq->len;
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
-       switch (source) {
-       case NETFS_FILL_WITH_ZEROES:
-               netfs_fill_with_zeroes(rreq, subreq);
-               break;
-       case NETFS_DOWNLOAD_FROM_SERVER:
-               netfs_read_from_server(rreq, subreq);
-               break;
-       case NETFS_READ_FROM_CACHE:
-               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
-               break;
-       default:
-               BUG();
-       }
-
-       return true;
-
-subreq_failed:
-       rreq->error = subreq->error;
-       netfs_put_subrequest(subreq, false);
-       return false;
-}
-
-static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
-                                        loff_t *_start, size_t *_len, loff_t i_size)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-
-       if (cres->ops && cres->ops->expand_readahead)
-               cres->ops->expand_readahead(cres, _start, _len, i_size);
-}
-
-static void netfs_rreq_expand(struct netfs_read_request *rreq,
-                             struct readahead_control *ractl)
-{
-       /* Give the cache a chance to change the request parameters.  The
-        * resultant request must contain the original region.
-        */
-       netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
-
-       /* Give the netfs a chance to change the request parameters.  The
-        * resultant request must contain the original region.
-        */
-       if (rreq->netfs_ops->expand_readahead)
-               rreq->netfs_ops->expand_readahead(rreq);
-
-       /* Expand the request if the cache wants it to start earlier.  Note
-        * that the expansion may get further extended if the VM wishes to
-        * insert THPs and the preferred start and/or end wind up in the middle
-        * of THPs.
-        *
-        * If this is the case, however, the THP size should be an integer
-        * multiple of the cache granule size, so we get a whole number of
-        * granules to deal with.
-        */
-       if (rreq->start  != readahead_pos(ractl) ||
-           rreq->len != readahead_length(ractl)) {
-               readahead_expand(ractl, rreq->start, rreq->len);
-               rreq->start  = readahead_pos(ractl);
-               rreq->len = readahead_length(ractl);
-
-               trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
-                                netfs_read_trace_expanded);
-       }
-}
-
-/**
- * netfs_readahead - Helper to manage a read request
- * @ractl: The description of the readahead request
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Fulfil a readahead request by drawing data from the cache if possible, or
- * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
- * requests from different sources will get munged together.  If necessary, the
- * readahead window can be expanded in either direction to a more convenient
- * alighment for RPC efficiency or to make storage in the cache feasible.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.  It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
- *
- * This is usable whether or not caching is enabled.
- */
-void netfs_readahead(struct readahead_control *ractl,
-                    const struct netfs_read_request_ops *ops,
-                    void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       unsigned int debug_index = 0;
-       int ret;
-
-       _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
-
-       if (readahead_count(ractl) == 0)
-               goto cleanup;
-
-       rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
-       if (!rreq)
-               goto cleanup;
-       rreq->mapping   = ractl->mapping;
-       rreq->start     = readahead_pos(ractl);
-       rreq->len       = readahead_length(ractl);
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto cleanup_free;
-       }
-
-       netfs_stat(&netfs_n_rh_readahead);
-       trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
-                        netfs_read_trace_readahead);
-
-       netfs_rreq_expand(rreq, ractl);
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Drop the refs on the folios here rather than in the cache or
-        * filesystem.  The locks will be dropped in netfs_rreq_unlock().
-        */
-       while (readahead_folio(ractl))
-               ;
-
-       /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_rd_ops))
-               netfs_rreq_assess(rreq, false);
-       return;
-
-cleanup_free:
-       netfs_put_read_request(rreq, false);
-       return;
-cleanup:
-       if (netfs_priv)
-               ops->cleanup(ractl->mapping, netfs_priv);
-       return;
-}
-EXPORT_SYMBOL(netfs_readahead);
-
-/**
- * netfs_readpage - Helper to manage a readpage request
- * @file: The file to read from
- * @folio: The folio to read
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Fulfil a readpage request by drawing data from the cache if possible, or the
- * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
- * from different sources will get munged together.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.  It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
- *
- * This is usable whether or not caching is enabled.
- */
-int netfs_readpage(struct file *file,
-                  struct folio *folio,
-                  const struct netfs_read_request_ops *ops,
-                  void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       unsigned int debug_index = 0;
-       int ret;
-
-       _enter("%lx", folio_index(folio));
-
-       rreq = netfs_alloc_read_request(ops, netfs_priv, file);
-       if (!rreq) {
-               if (netfs_priv)
-                       ops->cleanup(folio_file_mapping(folio), netfs_priv);
-               folio_unlock(folio);
-               return -ENOMEM;
-       }
-       rreq->mapping   = folio_file_mapping(folio);
-       rreq->start     = folio_file_pos(folio);
-       rreq->len       = folio_size(folio);
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
-                       folio_unlock(folio);
-                       goto out;
-               }
-       }
-
-       netfs_stat(&netfs_n_rh_readpage);
-       trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
-
-       netfs_get_read_request(rreq);
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
-        * the service code isn't punted off to a random thread pool to
-        * process.
-        */
-       do {
-               wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
-               netfs_rreq_assess(rreq, false);
-       } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
-
-       ret = rreq->error;
-       if (ret == 0 && rreq->submitted < rreq->len) {
-               trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
-               ret = -EIO;
-       }
-out:
-       netfs_put_read_request(rreq, false);
-       return ret;
-}
-EXPORT_SYMBOL(netfs_readpage);
-
-/*
- * Prepare a folio for writing without reading first
- * @folio: The folio being prepared
- * @pos: starting position for the write
- * @len: length of write
- *
- * In some cases, write_begin doesn't need to read at all:
- * - full folio write
- * - write that lies in a folio that is completely beyond EOF
- * - write that covers the folio from start to EOF or beyond it
- *
- * If any of these criteria are met, then zero out the unwritten parts
- * of the folio and return true. Otherwise, return false.
- */
-static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
-{
-       struct inode *inode = folio_inode(folio);
-       loff_t i_size = i_size_read(inode);
-       size_t offset = offset_in_folio(folio, pos);
-
-       /* Full folio write */
-       if (offset == 0 && len >= folio_size(folio))
-               return true;
-
-       /* pos beyond last folio in the file */
-       if (pos - offset >= i_size)
-               goto zero_out;
-
-       /* Write that covers from the start of the folio to EOF or beyond */
-       if (offset == 0 && (pos + len) >= i_size)
-               goto zero_out;
-
-       return false;
-zero_out:
-       zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
-       return true;
-}
-
-/**
- * netfs_write_begin - Helper to prepare for writing
- * @file: The file to read from
- * @mapping: The mapping to read from
- * @pos: File position at which the write will begin
- * @len: The length of the write (may extend beyond the end of the folio chosen)
- * @aop_flags: AOP_* flags
- * @_folio: Where to put the resultant folio
- * @_fsdata: Place for the netfs to store a cookie
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Pre-read data for a write-begin request by drawing data from the cache if
- * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
- * Multiple I/O requests from different sources will get munged together.  If
- * necessary, the readahead window can be expanded in either direction to a
- * more convenient alighment for RPC efficiency or to make storage in the cache
- * feasible.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.
- *
- * The check_write_begin() operation can be provided to check for and flush
- * conflicting writes once the folio is grabbed and locked.  It is passed a
- * pointer to the fsdata cookie that gets returned to the VM to be passed to
- * write_end.  It is permitted to sleep.  It should return 0 if the request
- * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
- * be regot; or return an error.
- *
- * This is usable whether or not caching is enabled.
- */
-int netfs_write_begin(struct file *file, struct address_space *mapping,
-                     loff_t pos, unsigned int len, unsigned int aop_flags,
-                     struct folio **_folio, void **_fsdata,
-                     const struct netfs_read_request_ops *ops,
-                     void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       struct folio *folio;
-       struct inode *inode = file_inode(file);
-       unsigned int debug_index = 0, fgp_flags;
-       pgoff_t index = pos >> PAGE_SHIFT;
-       int ret;
-
-       DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
-
-retry:
-       fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
-       if (aop_flags & AOP_FLAG_NOFS)
-               fgp_flags |= FGP_NOFS;
-       folio = __filemap_get_folio(mapping, index, fgp_flags,
-                                   mapping_gfp_mask(mapping));
-       if (!folio)
-               return -ENOMEM;
-
-       if (ops->check_write_begin) {
-               /* Allow the netfs (eg. ceph) to flush conflicts. */
-               ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
-               if (ret < 0) {
-                       trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
-                       if (ret == -EAGAIN)
-                               goto retry;
-                       goto error;
-               }
-       }
-
-       if (folio_test_uptodate(folio))
-               goto have_folio;
-
-       /* If the page is beyond the EOF, we want to clear it - unless it's
-        * within the cache granule containing the EOF, in which case we need
-        * to preload the granule.
-        */
-       if (!ops->is_cache_enabled(inode) &&
-           netfs_skip_folio_read(folio, pos, len)) {
-               netfs_stat(&netfs_n_rh_write_zskip);
-               goto have_folio_no_wait;
-       }
-
-       ret = -ENOMEM;
-       rreq = netfs_alloc_read_request(ops, netfs_priv, file);
-       if (!rreq)
-               goto error;
-       rreq->mapping           = folio_file_mapping(folio);
-       rreq->start             = folio_file_pos(folio);
-       rreq->len               = folio_size(folio);
-       rreq->no_unlock_folio   = folio_index(folio);
-       __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
-       netfs_priv = NULL;
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto error_put;
-       }
-
-       netfs_stat(&netfs_n_rh_write_begin);
-       trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
-
-       /* Expand the request to meet caching requirements and download
-        * preferences.
-        */
-       ractl._nr_pages = folio_nr_pages(folio);
-       netfs_rreq_expand(rreq, &ractl);
-       netfs_get_read_request(rreq);
-
-       /* We hold the folio locks, so we can drop the references */
-       folio_get(folio);
-       while (readahead_folio(&ractl))
-               ;
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
-        * the service code isn't punted off to a random thread pool to
-        * process.
-        */
-       for (;;) {
-               wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
-               netfs_rreq_assess(rreq, false);
-               if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
-                       break;
-               cond_resched();
-       }
-
-       ret = rreq->error;
-       if (ret == 0 && rreq->submitted < rreq->len) {
-               trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
-               ret = -EIO;
-       }
-       netfs_put_read_request(rreq, false);
-       if (ret < 0)
-               goto error;
-
-have_folio:
-       ret = folio_wait_fscache_killable(folio);
-       if (ret < 0)
-               goto error;
-have_folio_no_wait:
-       if (netfs_priv)
-               ops->cleanup(mapping, netfs_priv);
-       *_folio = folio;
-       _leave(" = 0");
-       return 0;
-
-error_put:
-       netfs_put_read_request(rreq, false);
-error:
-       folio_unlock(folio);
-       folio_put(folio);
-       if (netfs_priv)
-               ops->cleanup(mapping, netfs_priv);
-       _leave(" = %d", ret);
-       return ret;
-}
-EXPORT_SYMBOL(netfs_write_begin);
index 9ae538c..5510a7a 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/export.h>
 #include <linux/seq_file.h>
-#include <linux/netfs.h>
 #include "internal.h"
 
 atomic_t netfs_n_rh_readahead;
index 4dee53c..f73c09a 100644 (file)
@@ -238,14 +238,6 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp)
        }
 }
 
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
 /*
  * Fallback page reading interface.
  */
index 66bdaa2..ca611ac 100644 (file)
 #include "page.h"
 #include "btnode.h"
 
+
+/**
+ * nilfs_init_btnc_inode - initialize B-tree node cache inode
+ * @btnc_inode: inode to be initialized
+ *
+ * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
+ */
+void nilfs_init_btnc_inode(struct inode *btnc_inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
+
+       btnc_inode->i_mode = S_IFREG;
+       ii->i_flags = 0;
+       memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
+       mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
+}
+
 void nilfs_btnode_cache_clear(struct address_space *btnc)
 {
        invalidate_mapping_pages(btnc, 0, -1);
@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
 struct buffer_head *
 nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
 {
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        struct buffer_head *bh;
 
        bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
                              struct buffer_head **pbh, sector_t *submit_ptr)
 {
        struct buffer_head *bh;
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        struct page *page;
        int err;
 
@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
                                    struct nilfs_btnode_chkey_ctxt *ctxt)
 {
        struct buffer_head *obh, *nbh;
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
        int err;
 
index 1166365..bd5544e 100644 (file)
@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
        struct buffer_head *newbh;
 };
 
+void nilfs_init_btnc_inode(struct inode *btnc_inode);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
                                              __u64 blocknr);
index 3594eab..f544c22 100644 (file)
@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
                                     __u64 ptr, struct buffer_head **bhp)
 {
-       struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btnc = btnc_inode->i_mapping;
        struct buffer_head *bh;
 
        bh = nilfs_btnode_create_block(btnc, ptr);
@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
                                   struct buffer_head **bhp,
                                   const struct nilfs_btree_readahead_info *ra)
 {
-       struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btnc = btnc_inode->i_mapping;
        struct buffer_head *bh, *ra_bh;
        sector_t submit_ptr = 0;
        int ret;
@@ -1741,6 +1743,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
                dat = nilfs_bmap_get_dat(btree);
        }
 
+       ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
+       if (ret < 0)
+               return ret;
+
        ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
        if (ret < 0)
                return ret;
@@ -1913,7 +1919,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
                path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
                path[level].bp_ctxt.bh = path[level].bp_bh;
                ret = nilfs_btnode_prepare_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                if (ret < 0) {
                        nilfs_dat_abort_update(dat,
@@ -1939,7 +1945,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
 
        if (buffer_nilfs_node(path[level].bp_bh)) {
                nilfs_btnode_commit_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                path[level].bp_bh = path[level].bp_ctxt.bh;
        }
@@ -1958,7 +1964,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
                               &path[level].bp_newreq.bpr_req);
        if (buffer_nilfs_node(path[level].bp_bh))
                nilfs_btnode_abort_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
 }
 
@@ -2134,7 +2140,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
                                             struct list_head *listp)
 {
-       struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btcache = btnc_inode->i_mapping;
        struct list_head lists[NILFS_BTREE_LEVEL_MAX];
        struct pagevec pvec;
        struct buffer_head *bh, *head;
@@ -2188,12 +2195,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
                path[level].bp_ctxt.newkey = blocknr;
                path[level].bp_ctxt.bh = *bh;
                ret = nilfs_btnode_prepare_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                if (ret < 0)
                        return ret;
                nilfs_btnode_commit_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                *bh = path[level].bp_ctxt.bh;
        }
@@ -2398,6 +2405,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
 
        if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
                ret = -EIO;
+       else
+               ret = nilfs_attach_btree_node_cache(
+                       &NILFS_BMAP_I(bmap)->vfs_inode);
+
        return ret;
 }
 
index dc51d3b..3b55e23 100644 (file)
@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
        di = NILFS_DAT_I(dat);
        lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
        nilfs_palloc_setup_cache(dat, &di->palloc_cache);
-       nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+       err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+       if (err)
+               goto failed;
 
        err = nilfs_read_inode_common(dat, raw_inode);
        if (err)
index a8f5315..04fdd42 100644 (file)
@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
 int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
                                   __u64 vbn, struct buffer_head **out_bh)
 {
+       struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
        int ret;
 
-       ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
+       ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
                                        vbn ? : pbn, pbn, REQ_OP_READ, 0,
                                        out_bh, &pbn);
        if (ret == -EEXIST) /* internal code (cache hit) */
@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
        ii->i_flags = 0;
        nilfs_bmap_init_gc(ii->i_bmap);
 
-       return 0;
+       return nilfs_attach_btree_node_cache(inode);
 }
 
 /**
@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
                ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
                list_del_init(&ii->i_dirty);
                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
-               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+               nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
                iput(&ii->vfs_inode);
        }
 }
index 476a4a6..6045cea 100644 (file)
  * @cno: checkpoint number
  * @root: pointer on NILFS root object (mounted checkpoint)
  * @for_gc: inode for GC flag
+ * @for_btnc: inode for B-tree node cache flag
+ * @for_shadow: inode for shadowed page cache flag
  */
 struct nilfs_iget_args {
        u64 ino;
        __u64 cno;
        struct nilfs_root *root;
-       int for_gc;
+       bool for_gc;
+       bool for_btnc;
+       bool for_shadow;
 };
 
 static int nilfs_iget_test(struct inode *inode, void *opaque);
@@ -312,7 +316,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
                                     unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
@@ -525,6 +530,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
                return 0;
 
        ii = NILFS_I(inode);
+       if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
+               if (!args->for_btnc)
+                       return 0;
+       } else if (args->for_btnc) {
+               return 0;
+       }
+       if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
+               if (!args->for_shadow)
+                       return 0;
+       } else if (args->for_shadow) {
+               return 0;
+       }
+
        if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
                return !args->for_gc;
 
@@ -536,15 +554,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
        struct nilfs_iget_args *args = opaque;
 
        inode->i_ino = args->ino;
-       if (args->for_gc) {
+       NILFS_I(inode)->i_cno = args->cno;
+       NILFS_I(inode)->i_root = args->root;
+       if (args->root && args->ino == NILFS_ROOT_INO)
+               nilfs_get_root(args->root);
+
+       if (args->for_gc)
                NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
-               NILFS_I(inode)->i_cno = args->cno;
-               NILFS_I(inode)->i_root = NULL;
-       } else {
-               if (args->root && args->ino == NILFS_ROOT_INO)
-                       nilfs_get_root(args->root);
-               NILFS_I(inode)->i_root = args->root;
-       }
+       if (args->for_btnc)
+               NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
+       if (args->for_shadow)
+               NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
        return 0;
 }
 
@@ -552,7 +572,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
                            unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return ilookup5(sb, ino, nilfs_iget_test, &args);
@@ -562,7 +583,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
                                unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
@@ -593,7 +615,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
                                __u64 cno)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
+               .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
+               .for_btnc = false, .for_shadow = false
        };
        struct inode *inode;
        int err;
@@ -613,6 +636,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
        return inode;
 }
 
+/**
+ * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
+ * @inode: inode object
+ *
+ * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
+ * or does nothing if the inode already has it.  This function allocates
+ * an additional inode to maintain page cache of B-tree nodes one-on-one.
+ *
+ * Return Value: On success, 0 is returned. On errors, one of the following
+ * negative error code is returned.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
+int nilfs_attach_btree_node_cache(struct inode *inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct inode *btnc_inode;
+       struct nilfs_iget_args args;
+
+       if (ii->i_assoc_inode)
+               return 0;
+
+       args.ino = inode->i_ino;
+       args.root = ii->i_root;
+       args.cno = ii->i_cno;
+       args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
+       args.for_btnc = true;
+       args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
+
+       btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
+                                 nilfs_iget_set, &args);
+       if (unlikely(!btnc_inode))
+               return -ENOMEM;
+       if (btnc_inode->i_state & I_NEW) {
+               nilfs_init_btnc_inode(btnc_inode);
+               unlock_new_inode(btnc_inode);
+       }
+       NILFS_I(btnc_inode)->i_assoc_inode = inode;
+       NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
+       ii->i_assoc_inode = btnc_inode;
+
+       return 0;
+}
+
+/**
+ * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
+ * @inode: inode object
+ *
+ * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
+ * holder inode bound to @inode, or does nothing if @inode doesn't have it.
+ */
+void nilfs_detach_btree_node_cache(struct inode *inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct inode *btnc_inode = ii->i_assoc_inode;
+
+       if (btnc_inode) {
+               NILFS_I(btnc_inode)->i_assoc_inode = NULL;
+               ii->i_assoc_inode = NULL;
+               iput(btnc_inode);
+       }
+}
+
+/**
+ * nilfs_iget_for_shadow - obtain inode for shadow mapping
+ * @inode: inode object that uses shadow mapping
+ *
+ * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
+ * caches for shadow mapping.  The page cache for data pages is set up
+ * in one inode and the one for b-tree node pages is set up in the
+ * other inode, which is attached to the former inode.
+ *
+ * Return Value: On success, a pointer to the inode for data pages is
+ * returned. On errors, one of the following negative error code is returned
+ * in a pointer type.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
+struct inode *nilfs_iget_for_shadow(struct inode *inode)
+{
+       struct nilfs_iget_args args = {
+               .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = true
+       };
+       struct inode *s_inode;
+       int err;
+
+       s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
+                              nilfs_iget_set, &args);
+       if (unlikely(!s_inode))
+               return ERR_PTR(-ENOMEM);
+       if (!(s_inode->i_state & I_NEW))
+               return inode;
+
+       NILFS_I(s_inode)->i_flags = 0;
+       memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
+       mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
+
+       err = nilfs_attach_btree_node_cache(s_inode);
+       if (unlikely(err)) {
+               iget_failed(s_inode);
+               return ERR_PTR(err);
+       }
+       unlock_new_inode(s_inode);
+       return s_inode;
+}
+
 void nilfs_write_inode_common(struct inode *inode,
                              struct nilfs_inode *raw_inode, int has_bmap)
 {
@@ -760,7 +890,8 @@ static void nilfs_clear_inode(struct inode *inode)
        if (test_bit(NILFS_I_BMAP, &ii->i_state))
                nilfs_bmap_clear(ii->i_bmap);
 
-       nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+       if (!test_bit(NILFS_I_BTNC, &ii->i_state))
+               nilfs_detach_btree_node_cache(inode);
 
        if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
                nilfs_put_root(ii->i_root);
index 78db33d..d29a0f2 100644 (file)
@@ -471,9 +471,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
 void nilfs_mdt_clear(struct inode *inode)
 {
        struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+       struct nilfs_shadow_map *shadow = mdi->mi_shadow;
 
        if (mdi->mi_palloc_cache)
                nilfs_palloc_destroy_cache(inode);
+
+       if (shadow) {
+               struct inode *s_inode = shadow->inode;
+
+               shadow->inode = NULL;
+               iput(s_inode);
+               mdi->mi_shadow = NULL;
+       }
 }
 
 /**
@@ -507,12 +516,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
                               struct nilfs_shadow_map *shadow)
 {
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+       struct inode *s_inode;
 
        INIT_LIST_HEAD(&shadow->frozen_buffers);
-       address_space_init_once(&shadow->frozen_data);
-       nilfs_mapping_init(&shadow->frozen_data, inode);
-       address_space_init_once(&shadow->frozen_btnodes);
-       nilfs_mapping_init(&shadow->frozen_btnodes, inode);
+
+       s_inode = nilfs_iget_for_shadow(inode);
+       if (IS_ERR(s_inode))
+               return PTR_ERR(s_inode);
+
+       shadow->inode = s_inode;
        mi->mi_shadow = shadow;
        return 0;
 }
@@ -526,14 +538,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
        struct nilfs_inode_info *ii = NILFS_I(inode);
        struct nilfs_shadow_map *shadow = mi->mi_shadow;
+       struct inode *s_inode = shadow->inode;
        int ret;
 
-       ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
+       ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
        if (ret)
                goto out;
 
-       ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
-                                    &ii->i_btnode_cache);
+       ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
+                                    ii->i_assoc_inode->i_mapping);
        if (ret)
                goto out;
 
@@ -549,7 +562,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
        struct page *page;
        int blkbits = inode->i_blkbits;
 
-       page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
+       page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
        if (!page)
                return -ENOMEM;
 
@@ -581,7 +594,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
        struct page *page;
        int n;
 
-       page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
+       page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
        if (page) {
                if (page_has_buffers(page)) {
                        n = bh_offset(bh) >> inode->i_blkbits;
@@ -622,10 +635,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
                nilfs_palloc_clear_cache(inode);
 
        nilfs_clear_dirty_pages(inode->i_mapping, true);
-       nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
+       nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
 
-       nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
-       nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
+       nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
+       nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
+                             NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
 
        nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
 
@@ -640,10 +654,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
 {
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
        struct nilfs_shadow_map *shadow = mi->mi_shadow;
+       struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
 
        down_write(&mi->mi_sem);
        nilfs_release_frozen_buffers(shadow);
-       truncate_inode_pages(&shadow->frozen_data, 0);
-       truncate_inode_pages(&shadow->frozen_btnodes, 0);
+       truncate_inode_pages(shadow->inode->i_mapping, 0);
+       truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
        up_write(&mi->mi_sem);
 }
index 8f86080..9e23bab 100644 (file)
 /**
  * struct nilfs_shadow_map - shadow mapping of meta data file
  * @bmap_store: shadow copy of bmap state
- * @frozen_data: shadowed dirty data pages
- * @frozen_btnodes: shadowed dirty b-tree nodes' pages
+ * @inode: holder of page caches used in shadow mapping
  * @frozen_buffers: list of frozen buffers
  */
 struct nilfs_shadow_map {
        struct nilfs_bmap_store bmap_store;
-       struct address_space frozen_data;
-       struct address_space frozen_btnodes;
+       struct inode *inode;
        struct list_head frozen_buffers;
 };
 
index a7b8175..1344f7d 100644 (file)
@@ -28,7 +28,7 @@
  * @i_xattr: <TODO>
  * @i_dir_start_lookup: page index of last successful search
  * @i_cno: checkpoint number for GC inode
- * @i_btnode_cache: cached pages of b-tree nodes
+ * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
  * @i_dirty: list for connecting dirty files
  * @xattr_sem: semaphore for extended attributes processing
  * @i_bh: buffer contains disk inode
@@ -43,7 +43,7 @@ struct nilfs_inode_info {
        __u64 i_xattr;  /* sector_t ??? */
        __u32 i_dir_start_lookup;
        __u64 i_cno;            /* check point number for GC inode */
-       struct address_space i_btnode_cache;
+       struct inode *i_assoc_inode;
        struct list_head i_dirty;       /* List for connecting dirty files */
 
 #ifdef CONFIG_NILFS_XATTR
@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
        return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
 }
 
-static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
-{
-       struct nilfs_inode_info *ii =
-               container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
-       return &ii->vfs_inode;
-}
-
 /*
  * Dynamic state flags of NILFS on-memory inode (i_state)
  */
@@ -98,6 +91,8 @@ enum {
        NILFS_I_INODE_SYNC,             /* dsync is not allowed for inode */
        NILFS_I_BMAP,                   /* has bmap and btnode_cache */
        NILFS_I_GCINODE,                /* inode for GC, on memory only */
+       NILFS_I_BTNC,                   /* inode for btree node cache */
+       NILFS_I_SHADOW,                 /* inode for shadowed page cache */
 };
 
 /*
@@ -267,6 +262,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
                         unsigned long ino);
 extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
                                       unsigned long ino, __u64 cno);
+int nilfs_attach_btree_node_cache(struct inode *inode);
+void nilfs_detach_btree_node_cache(struct inode *inode);
+struct inode *nilfs_iget_for_shadow(struct inode *inode);
 extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
 extern void nilfs_truncate(struct inode *);
 extern void nilfs_evict_inode(struct inode *);
index 063dd16..a8e88cc 100644 (file)
@@ -436,22 +436,12 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page,
        return nc;
 }
 
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
-{
-       mapping->host = inode;
-       mapping->flags = 0;
-       mapping_set_gfp_mask(mapping, GFP_NOFS);
-       mapping->private_data = NULL;
-       mapping->a_ops = &empty_aops;
-}
-
 /*
  * NILFS2 needs clear_page_dirty() in the following two cases:
  *
- * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
- *    page dirty flags when it copies back pages from the shadow cache
- *    (gcdat->{i_mapping,i_btnode_cache}) to its original cache
- *    (dat->{i_mapping,i_btnode_cache}).
+ * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
+ *    flag of pages when it copies back pages from shadow cache to the
+ *    original cache.
  *
  * 2) Some B-tree operations like insertion or deletion may dispose buffers
  *    in dirty state, and this needs to cancel the dirty state of their pages.
index 569263b..21ddcdd 100644 (file)
@@ -43,7 +43,6 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_page(struct page *, bool);
 void nilfs_clear_dirty_pages(struct address_space *, bool);
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
 unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
                                            unsigned int);
 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
index 85a8533..0afe083 100644 (file)
@@ -733,15 +733,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
                                            struct list_head *listp)
 {
        struct nilfs_inode_info *ii = NILFS_I(inode);
-       struct address_space *mapping = &ii->i_btnode_cache;
+       struct inode *btnc_inode = ii->i_assoc_inode;
        struct pagevec pvec;
        struct buffer_head *bh, *head;
        unsigned int i;
        pgoff_t index = 0;
 
+       if (!btnc_inode)
+               return;
+
        pagevec_init(&pvec);
 
-       while (pagevec_lookup_tag(&pvec, mapping, &index,
+       while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
                                        PAGECACHE_TAG_DIRTY)) {
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        bh = head = page_buffers(pvec.pages[i]);
@@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
                        continue;
                list_del_init(&ii->i_dirty);
                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
-               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+               nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
                iput(&ii->vfs_inode);
        }
 }
index 3e05c98..ba108f9 100644 (file)
@@ -157,7 +157,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
        ii->i_bh = NULL;
        ii->i_state = 0;
        ii->i_cno = 0;
-       nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
+       ii->i_assoc_inode = NULL;
+       ii->i_bmap = &ii->i_bmap_data;
        return &ii->vfs_inode;
 }
 
@@ -1377,8 +1378,6 @@ static void nilfs_inode_init_once(void *obj)
 #ifdef CONFIG_NILFS_XATTR
        init_rwsem(&ii->xattr_sem);
 #endif
-       address_space_init_once(&ii->i_btnode_cache);
-       ii->i_bmap = &ii->i_bmap_data;
        inode_init_once(&ii->vfs_inode);
 }
 
index 273f65e..0b6f551 100644 (file)
@@ -337,7 +337,6 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
 /* Read information header from global quota file */
 int ocfs2_global_read_info(struct super_block *sb, int type)
 {
-       struct inode *gqinode = NULL;
        unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
                                              GROUP_QUOTA_SYSTEM_INODE };
        struct ocfs2_global_disk_dqinfo dinfo;
@@ -346,29 +345,31 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
        u64 pcount;
        int status;
 
+       oinfo->dqi_gi.dqi_sb = sb;
+       oinfo->dqi_gi.dqi_type = type;
+       ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
+       oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
+       oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
+       oinfo->dqi_gqi_bh = NULL;
+       oinfo->dqi_gqi_count = 0;
+
        /* Read global header */
-       gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
+       oinfo->dqi_gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
                        OCFS2_INVALID_SLOT);
-       if (!gqinode) {
+       if (!oinfo->dqi_gqinode) {
                mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
                        type);
                status = -EINVAL;
                goto out_err;
        }
-       oinfo->dqi_gi.dqi_sb = sb;
-       oinfo->dqi_gi.dqi_type = type;
-       oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
-       oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
-       oinfo->dqi_gqi_bh = NULL;
-       oinfo->dqi_gqi_count = 0;
-       oinfo->dqi_gqinode = gqinode;
+
        status = ocfs2_lock_global_qf(oinfo, 0);
        if (status < 0) {
                mlog_errno(status);
                goto out_err;
        }
 
-       status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
+       status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
                                             &pcount, NULL);
        if (status < 0)
                goto out_unlock;
index 0e4b16d..b1a8b04 100644 (file)
@@ -702,8 +702,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
        info->dqi_priv = oinfo;
        oinfo->dqi_type = type;
        INIT_LIST_HEAD(&oinfo->dqi_chunk);
-       oinfo->dqi_gqinode = NULL;
-       ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
        oinfo->dqi_rec = NULL;
        oinfo->dqi_lqi_bh = NULL;
        oinfo->dqi_libh = NULL;
index dbe72f6..8615188 100644 (file)
@@ -349,20 +349,97 @@ out_budg:
        return err;
 }
 
-static int do_tmpfile(struct inode *dir, struct dentry *dentry,
-                     umode_t mode, struct inode **whiteout)
+static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
+{
+       int err;
+       umode_t mode = S_IFCHR | WHITEOUT_MODE;
+       struct inode *inode;
+       struct ubifs_info *c = dir->i_sb->s_fs_info;
+       struct fscrypt_name nm;
+
+       /*
+        * Create an inode('nlink = 1') for whiteout without updating journal,
+        * let ubifs_jnl_rename() store it on flash to complete rename whiteout
+        * atomically.
+        */
+
+       dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+               dentry, mode, dir->i_ino);
+
+       err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+       if (err)
+               return ERR_PTR(err);
+
+       inode = ubifs_new_inode(c, dir, mode);
+       if (IS_ERR(inode)) {
+               err = PTR_ERR(inode);
+               goto out_free;
+       }
+
+       init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
+       ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
+
+       err = ubifs_init_security(dir, inode, &dentry->d_name);
+       if (err)
+               goto out_inode;
+
+       /* The dir size is updated by do_rename. */
+       insert_inode_hash(inode);
+
+       return inode;
+
+out_inode:
+       make_bad_inode(inode);
+       iput(inode);
+out_free:
+       fscrypt_free_filename(&nm);
+       ubifs_err(c, "cannot create whiteout file, error %d", err);
+       return ERR_PTR(err);
+}
+
+/**
+ * lock_2_inodes - a wrapper for locking two UBIFS inodes.
+ * @inode1: first inode
+ * @inode2: second inode
+ *
+ * We do not implement any tricks to guarantee strict lock ordering, because
+ * VFS has already done it for us on the @i_mutex. So this is just a simple
+ * wrapper function.
+ */
+static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
+{
+       mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
+       mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
+}
+
+/**
+ * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
+ * @inode1: first inode
+ * @inode2: second inode
+ */
+static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
+{
+       mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
+       mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
+}
+
+static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+                        struct dentry *dentry, umode_t mode)
 {
        struct inode *inode;
        struct ubifs_info *c = dir->i_sb->s_fs_info;
-       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1};
+       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+                                       .dirtied_ino = 1};
        struct ubifs_budget_req ino_req = { .dirtied_ino = 1 };
-       struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir);
+       struct ubifs_inode *ui;
        int err, instantiated = 0;
        struct fscrypt_name nm;
 
        /*
-        * Budget request settings: new dirty inode, new direntry,
-        * budget for dirtied inode will be released via writeback.
+        * Budget request settings: new inode, new direntry, changing the
+        * parent directory inode.
+        * Allocate budget separately for new dirtied inode, the budget will
+        * be released via writeback.
         */
 
        dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
@@ -392,42 +469,30 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
        }
        ui = ubifs_inode(inode);
 
-       if (whiteout) {
-               init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
-               ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
-       }
-
        err = ubifs_init_security(dir, inode, &dentry->d_name);
        if (err)
                goto out_inode;
 
        mutex_lock(&ui->ui_mutex);
        insert_inode_hash(inode);
-
-       if (whiteout) {
-               mark_inode_dirty(inode);
-               drop_nlink(inode);
-               *whiteout = inode;
-       } else {
-               d_tmpfile(dentry, inode);
-       }
+       d_tmpfile(dentry, inode);
        ubifs_assert(c, ui->dirty);
 
        instantiated = 1;
        mutex_unlock(&ui->ui_mutex);
 
-       mutex_lock(&dir_ui->ui_mutex);
+       lock_2_inodes(dir, inode);
        err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
        if (err)
                goto out_cancel;
-       mutex_unlock(&dir_ui->ui_mutex);
+       unlock_2_inodes(dir, inode);
 
        ubifs_release_budget(c, &req);
 
        return 0;
 
 out_cancel:
-       mutex_unlock(&dir_ui->ui_mutex);
+       unlock_2_inodes(dir, inode);
 out_inode:
        make_bad_inode(inode);
        if (!instantiated)
@@ -441,12 +506,6 @@ out_budg:
        return err;
 }
 
-static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
-                        struct dentry *dentry, umode_t mode)
-{
-       return do_tmpfile(dir, dentry, mode, NULL);
-}
-
 /**
  * vfs_dent_type - get VFS directory entry type.
  * @type: UBIFS directory entry type
@@ -660,32 +719,6 @@ static int ubifs_dir_release(struct inode *dir, struct file *file)
        return 0;
 }
 
-/**
- * lock_2_inodes - a wrapper for locking two UBIFS inodes.
- * @inode1: first inode
- * @inode2: second inode
- *
- * We do not implement any tricks to guarantee strict lock ordering, because
- * VFS has already done it for us on the @i_mutex. So this is just a simple
- * wrapper function.
- */
-static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
-{
-       mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
-       mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
-}
-
-/**
- * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
- * @inode1: first inode
- * @inode2: second inode
- */
-static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
-{
-       mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
-       mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
-}
-
 static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
                      struct dentry *dentry)
 {
@@ -949,7 +982,8 @@ static int ubifs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
        struct ubifs_inode *dir_ui = ubifs_inode(dir);
        struct ubifs_info *c = dir->i_sb->s_fs_info;
        int err, sz_change;
-       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 };
+       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+                                       .dirtied_ino = 1};
        struct fscrypt_name nm;
 
        /*
@@ -1264,17 +1298,19 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                                        .dirtied_ino = 3 };
        struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
                        .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
+       struct ubifs_budget_req wht_req;
        struct timespec64 time;
        unsigned int saved_nlink;
        struct fscrypt_name old_nm, new_nm;
 
        /*
-        * Budget request settings: deletion direntry, new direntry, removing
-        * the old inode, and changing old and new parent directory inodes.
+        * Budget request settings:
+        *   req: deletion direntry, new direntry, removing the old inode,
+        *   and changing old and new parent directory inodes.
+        *
+        *   wht_req: new whiteout inode for RENAME_WHITEOUT.
         *
-        * However, this operation also marks the target inode as dirty and
-        * does not write it, so we allocate budget for the target inode
-        * separately.
+        *   ino_req: marks the target inode as dirty and does not write it.
         */
 
        dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu flags 0x%x",
@@ -1331,20 +1367,44 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto out_release;
                }
 
-               err = do_tmpfile(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, &whiteout);
-               if (err) {
+               /*
+                * The whiteout inode without dentry is pinned in memory,
+                * umount won't happen during rename process because we
+                * got parent dentry.
+                */
+               whiteout = create_whiteout(old_dir, old_dentry);
+               if (IS_ERR(whiteout)) {
+                       err = PTR_ERR(whiteout);
                        kfree(dev);
                        goto out_release;
                }
 
-               spin_lock(&whiteout->i_lock);
-               whiteout->i_state |= I_LINKABLE;
-               spin_unlock(&whiteout->i_lock);
-
                whiteout_ui = ubifs_inode(whiteout);
                whiteout_ui->data = dev;
                whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
                ubifs_assert(c, !whiteout_ui->dirty);
+
+               memset(&wht_req, 0, sizeof(struct ubifs_budget_req));
+               wht_req.new_ino = 1;
+               wht_req.new_ino_d = ALIGN(whiteout_ui->data_len, 8);
+               /*
+                * To avoid deadlock between space budget (holds ui_mutex and
+                * waits wb work) and writeback work(waits ui_mutex), do space
+                * budget before ubifs inodes locked.
+                */
+               err = ubifs_budget_space(c, &wht_req);
+               if (err) {
+                       /*
+                        * Whiteout inode can not be written on flash by
+                        * ubifs_jnl_write_inode(), because it's neither
+                        * dirty nor zero-nlink.
+                        */
+                       iput(whiteout);
+                       goto out_release;
+               }
+
+               /* Add the old_dentry size to the old_dir size. */
+               old_sz -= CALC_DENT_SIZE(fname_len(&old_nm));
        }
 
        lock_4_inodes(old_dir, new_dir, new_inode, whiteout);
@@ -1416,29 +1476,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir);
                if (unlink && IS_SYNC(new_inode))
                        sync = 1;
-       }
-
-       if (whiteout) {
-               struct ubifs_budget_req wht_req = { .dirtied_ino = 1,
-                               .dirtied_ino_d = \
-                               ALIGN(ubifs_inode(whiteout)->data_len, 8) };
-
-               err = ubifs_budget_space(c, &wht_req);
-               if (err) {
-                       kfree(whiteout_ui->data);
-                       whiteout_ui->data_len = 0;
-                       iput(whiteout);
-                       goto out_release;
-               }
-
-               inc_nlink(whiteout);
-               mark_inode_dirty(whiteout);
-
-               spin_lock(&whiteout->i_lock);
-               whiteout->i_state &= ~I_LINKABLE;
-               spin_unlock(&whiteout->i_lock);
-
-               iput(whiteout);
+               /*
+                * S_SYNC flag of whiteout inherits from the old_dir, and we
+                * have already checked the old dir inode. So there is no need
+                * to check whiteout.
+                */
        }
 
        err = ubifs_jnl_rename(c, old_dir, old_inode, &old_nm, new_dir,
@@ -1449,6 +1491,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
        ubifs_release_budget(c, &req);
 
+       if (whiteout) {
+               ubifs_release_budget(c, &wht_req);
+               iput(whiteout);
+       }
+
        mutex_lock(&old_inode_ui->ui_mutex);
        release = old_inode_ui->dirty;
        mark_inode_dirty_sync(old_inode);
@@ -1457,11 +1504,16 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (release)
                ubifs_release_budget(c, &ino_req);
        if (IS_SYNC(old_inode))
-               err = old_inode->i_sb->s_op->write_inode(old_inode, NULL);
+               /*
+                * Rename finished here. Although old inode cannot be updated
+                * on flash, old ctime is not a big problem, don't return err
+                * code to userspace.
+                */
+               old_inode->i_sb->s_op->write_inode(old_inode, NULL);
 
        fscrypt_free_filename(&old_nm);
        fscrypt_free_filename(&new_nm);
-       return err;
+       return 0;
 
 out_cancel:
        if (unlink) {
@@ -1482,11 +1534,11 @@ out_cancel:
                                inc_nlink(old_dir);
                }
        }
+       unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
        if (whiteout) {
-               drop_nlink(whiteout);
+               ubifs_release_budget(c, &wht_req);
                iput(whiteout);
        }
-       unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
 out_release:
        ubifs_release_budget(c, &ino_req);
        ubifs_release_budget(c, &req);
index 8a9ffc2..0383fbd 100644 (file)
@@ -570,7 +570,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
        }
 
        if (!PagePrivate(page)) {
-               SetPagePrivate(page);
+               attach_page_private(page, (void *)1);
                atomic_long_inc(&c->dirty_pg_cnt);
                __set_page_dirty_nobuffers(page);
        }
@@ -947,7 +947,7 @@ static int do_writepage(struct page *page, int len)
                release_existing_page_budget(c);
 
        atomic_long_dec(&c->dirty_pg_cnt);
-       ClearPagePrivate(page);
+       detach_page_private(page);
        ClearPageChecked(page);
 
        kunmap(page);
@@ -1304,7 +1304,7 @@ static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
                release_existing_page_budget(c);
 
        atomic_long_dec(&c->dirty_pg_cnt);
-       folio_clear_private(folio);
+       folio_detach_private(folio);
        folio_clear_checked(folio);
 }
 
@@ -1471,8 +1471,8 @@ static int ubifs_migrate_page(struct address_space *mapping,
                return rc;
 
        if (PagePrivate(page)) {
-               ClearPagePrivate(page);
-               SetPagePrivate(newpage);
+               detach_page_private(page);
+               attach_page_private(newpage, (void *)1);
        }
 
        if (mode != MIGRATE_SYNC_NO_COPY)
@@ -1496,7 +1496,7 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
                return 0;
        ubifs_assert(c, PagePrivate(page));
        ubifs_assert(c, 0);
-       ClearPagePrivate(page);
+       detach_page_private(page);
        ClearPageChecked(page);
        return 1;
 }
@@ -1567,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
        else {
                if (!PageChecked(page))
                        ubifs_convert_page_budget(c);
-               SetPagePrivate(page);
+               attach_page_private(page, (void *)1);
                atomic_long_inc(&c->dirty_pg_cnt);
                __set_page_dirty_nobuffers(page);
        }
index 789a781..1607a3c 100644 (file)
@@ -854,16 +854,42 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
         */
        n = aligned_len >> c->max_write_shift;
        if (n) {
-               n <<= c->max_write_shift;
+               int m = n - 1;
+
                dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
                       wbuf->offs);
-               err = ubifs_leb_write(c, wbuf->lnum, buf + written,
-                                     wbuf->offs, n);
+
+               if (m) {
+                       /* '(n-1)<<c->max_write_shift < len' is always true. */
+                       m <<= c->max_write_shift;
+                       err = ubifs_leb_write(c, wbuf->lnum, buf + written,
+                                             wbuf->offs, m);
+                       if (err)
+                               goto out;
+                       wbuf->offs += m;
+                       aligned_len -= m;
+                       len -= m;
+                       written += m;
+               }
+
+               /*
+                * The non-written len of buf may be less than 'n' because
+                * parameter 'len' is not 8 bytes aligned, so here we read
+                * min(len, n) bytes from buf.
+                */
+               n = 1 << c->max_write_shift;
+               memcpy(wbuf->buf, buf + written, min(len, n));
+               if (n > len) {
+                       ubifs_assert(c, n - len < 8);
+                       ubifs_pad(c, wbuf->buf + len, n - len);
+               }
+
+               err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
                if (err)
                        goto out;
                wbuf->offs += n;
                aligned_len -= n;
-               len -= n;
+               len -= min(len, n);
                written += n;
        }
 
index c6a8634..71bcebe 100644 (file)
@@ -108,7 +108,7 @@ static int setflags(struct inode *inode, int flags)
        struct ubifs_inode *ui = ubifs_inode(inode);
        struct ubifs_info *c = inode->i_sb->s_fs_info;
        struct ubifs_budget_req req = { .dirtied_ino = 1,
-                                       .dirtied_ino_d = ui->data_len };
+                       .dirtied_ino_d = ALIGN(ui->data_len, 8) };
 
        err = ubifs_budget_space(c, &req);
        if (err)
index 8ea680d..75dab0a 100644 (file)
@@ -1207,9 +1207,9 @@ out_free:
  * @sync: non-zero if the write-buffer has to be synchronized
  *
  * This function implements the re-name operation which may involve writing up
- * to 4 inodes and 2 directory entries. It marks the written inodes as clean
- * and returns zero on success. In case of failure, a negative error code is
- * returned.
+ * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes)
+ * and 2 directory entries. It marks the written inodes as clean and returns
+ * zero on success. In case of failure, a negative error code is returned.
  */
 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                     const struct inode *old_inode,
@@ -1222,14 +1222,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        void *p;
        union ubifs_key key;
        struct ubifs_dent_node *dent, *dent2;
-       int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
+       int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0;
        int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
        int last_reference = !!(new_inode && new_inode->i_nlink == 0);
        int move = (old_dir != new_dir);
-       struct ubifs_inode *new_ui;
+       struct ubifs_inode *new_ui, *whiteout_ui;
        u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
        u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
        u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
+       u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ];
        u8 hash_dent1[UBIFS_HASH_ARR_SZ];
        u8 hash_dent2[UBIFS_HASH_ARR_SZ];
 
@@ -1249,9 +1250,20 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        } else
                ilen = 0;
 
+       if (whiteout) {
+               whiteout_ui = ubifs_inode(whiteout);
+               ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex));
+               ubifs_assert(c, whiteout->i_nlink == 1);
+               ubifs_assert(c, !whiteout_ui->dirty);
+               wlen = UBIFS_INO_NODE_SZ;
+               wlen += whiteout_ui->data_len;
+       } else
+               wlen = 0;
+
        aligned_dlen1 = ALIGN(dlen1, 8);
        aligned_dlen2 = ALIGN(dlen2, 8);
-       len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
+       len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) +
+             ALIGN(wlen, 8) + ALIGN(plen, 8);
        if (move)
                len += plen;
 
@@ -1313,6 +1325,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                p += ALIGN(ilen, 8);
        }
 
+       if (whiteout) {
+               pack_inode(c, p, whiteout, 0);
+               err = ubifs_node_calc_hash(c, p, hash_whiteout_inode);
+               if (err)
+                       goto out_release;
+
+               p += ALIGN(wlen, 8);
+       }
+
        if (!move) {
                pack_inode(c, p, old_dir, 1);
                err = ubifs_node_calc_hash(c, p, hash_old_dir);
@@ -1352,6 +1373,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                if (new_inode)
                        ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
                                                  new_inode->i_ino);
+               if (whiteout)
+                       ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
+                                                 whiteout->i_ino);
        }
        release_head(c, BASEHD);
 
@@ -1368,8 +1392,6 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
                if (err)
                        goto out_ro;
-
-               ubifs_delete_orphan(c, whiteout->i_ino);
        } else {
                err = ubifs_add_dirt(c, lnum, dlen2);
                if (err)
@@ -1390,6 +1412,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                offs += ALIGN(ilen, 8);
        }
 
+       if (whiteout) {
+               ino_key_init(c, &key, whiteout->i_ino);
+               err = ubifs_tnc_add(c, &key, lnum, offs, wlen,
+                                   hash_whiteout_inode);
+               if (err)
+                       goto out_ro;
+               offs += ALIGN(wlen, 8);
+       }
+
        ino_key_init(c, &key, old_dir->i_ino);
        err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
        if (err)
@@ -1410,6 +1441,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                new_ui->synced_i_size = new_ui->ui_size;
                spin_unlock(&new_ui->ui_lock);
        }
+       /*
+        * No need to mark whiteout inode clean.
+        * Whiteout doesn't have non-zero size, no need to update
+        * synced_i_size for whiteout_ui.
+        */
        mark_inode_clean(c, ubifs_inode(old_dir));
        if (move)
                mark_inode_clean(c, ubifs_inode(new_dir));
index f55828c..008fa46 100644 (file)
@@ -381,7 +381,7 @@ struct ubifs_gced_idx_leb {
  * @ui_mutex exists for two main reasons. At first it prevents inodes from
  * being written back while UBIFS changing them, being in the middle of an VFS
  * operation. This way UBIFS makes sure the inode fields are consistent. For
- * example, in 'ubifs_rename()' we change 3 inodes simultaneously, and
+ * example, in 'ubifs_rename()' we change 4 inodes simultaneously, and
  * write-back must not write any of them before we have finished.
  *
  * The second reason is budgeting - UBIFS has to budget all operations. If an
index 0cc8742..0e51c00 100644 (file)
@@ -33,7 +33,7 @@ $(obj)/utf8data.c: $(obj)/mkutf8data $(filter %.txt, $(cmd_utf8data)) FORCE
 else
 
 $(obj)/utf8data.c: $(src)/utf8data.c_shipped FORCE
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 
 endif
 
index 8949515..dfbad5c 100644 (file)
@@ -8,99 +8,6 @@
 #define AM3_CLKCTRL_OFFSET     0x0
 #define AM3_CLKCTRL_INDEX(offset)      ((offset) - AM3_CLKCTRL_OFFSET)
 
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* l4_per clocks */
-#define AM3_L4_PER_CLKCTRL_OFFSET      0x14
-#define AM3_L4_PER_CLKCTRL_INDEX(offset)       ((offset) - AM3_L4_PER_CLKCTRL_OFFSET)
-#define AM3_CPGMAC0_CLKCTRL    AM3_L4_PER_CLKCTRL_INDEX(0x14)
-#define AM3_LCDC_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0x18)
-#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c)
-#define AM3_TPTC0_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x24)
-#define AM3_EMIF_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0x28)
-#define AM3_OCMCRAM_CLKCTRL    AM3_L4_PER_CLKCTRL_INDEX(0x2c)
-#define AM3_GPMC_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0x30)
-#define AM3_MCASP0_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0x34)
-#define AM3_UART6_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x38)
-#define AM3_MMC1_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0x3c)
-#define AM3_ELM_CLKCTRL        AM3_L4_PER_CLKCTRL_INDEX(0x40)
-#define AM3_I2C3_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0x44)
-#define AM3_I2C2_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0x48)
-#define AM3_SPI0_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0x4c)
-#define AM3_SPI1_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0x50)
-#define AM3_L4_LS_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x60)
-#define AM3_MCASP1_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0x68)
-#define AM3_UART2_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x6c)
-#define AM3_UART3_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x70)
-#define AM3_UART4_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x74)
-#define AM3_UART5_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x78)
-#define AM3_TIMER7_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0x7c)
-#define AM3_TIMER2_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0x80)
-#define AM3_TIMER3_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0x84)
-#define AM3_TIMER4_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0x88)
-#define AM3_RNG_CLKCTRL        AM3_L4_PER_CLKCTRL_INDEX(0x90)
-#define AM3_AES_CLKCTRL        AM3_L4_PER_CLKCTRL_INDEX(0x94)
-#define AM3_SHAM_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0xa0)
-#define AM3_GPIO2_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0xac)
-#define AM3_GPIO3_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0xb0)
-#define AM3_GPIO4_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0xb4)
-#define AM3_TPCC_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0xbc)
-#define AM3_D_CAN0_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0xc0)
-#define AM3_D_CAN1_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0xc4)
-#define AM3_EPWMSS1_CLKCTRL    AM3_L4_PER_CLKCTRL_INDEX(0xcc)
-#define AM3_EPWMSS0_CLKCTRL    AM3_L4_PER_CLKCTRL_INDEX(0xd4)
-#define AM3_EPWMSS2_CLKCTRL    AM3_L4_PER_CLKCTRL_INDEX(0xd8)
-#define AM3_L3_INSTR_CLKCTRL   AM3_L4_PER_CLKCTRL_INDEX(0xdc)
-#define AM3_L3_MAIN_CLKCTRL    AM3_L4_PER_CLKCTRL_INDEX(0xe0)
-#define AM3_PRUSS_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0xe8)
-#define AM3_TIMER5_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0xec)
-#define AM3_TIMER6_CLKCTRL     AM3_L4_PER_CLKCTRL_INDEX(0xf0)
-#define AM3_MMC2_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0xf4)
-#define AM3_MMC3_CLKCTRL       AM3_L4_PER_CLKCTRL_INDEX(0xf8)
-#define AM3_TPTC1_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0xfc)
-#define AM3_TPTC2_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x100)
-#define AM3_SPINLOCK_CLKCTRL   AM3_L4_PER_CLKCTRL_INDEX(0x10c)
-#define AM3_MAILBOX_CLKCTRL    AM3_L4_PER_CLKCTRL_INDEX(0x110)
-#define AM3_L4_HS_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x120)
-#define AM3_OCPWP_CLKCTRL      AM3_L4_PER_CLKCTRL_INDEX(0x130)
-#define AM3_CLKDIV32K_CLKCTRL  AM3_L4_PER_CLKCTRL_INDEX(0x14c)
-
-/* l4_wkup clocks */
-#define AM3_L4_WKUP_CLKCTRL_OFFSET     0x4
-#define AM3_L4_WKUP_CLKCTRL_INDEX(offset)      ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET)
-#define AM3_CONTROL_CLKCTRL    AM3_L4_WKUP_CLKCTRL_INDEX(0x4)
-#define AM3_GPIO1_CLKCTRL      AM3_L4_WKUP_CLKCTRL_INDEX(0x8)
-#define AM3_L4_WKUP_CLKCTRL    AM3_L4_WKUP_CLKCTRL_INDEX(0xc)
-#define AM3_DEBUGSS_CLKCTRL    AM3_L4_WKUP_CLKCTRL_INDEX(0x14)
-#define AM3_WKUP_M3_CLKCTRL    AM3_L4_WKUP_CLKCTRL_INDEX(0xb0)
-#define AM3_UART1_CLKCTRL      AM3_L4_WKUP_CLKCTRL_INDEX(0xb4)
-#define AM3_I2C1_CLKCTRL       AM3_L4_WKUP_CLKCTRL_INDEX(0xb8)
-#define AM3_ADC_TSC_CLKCTRL    AM3_L4_WKUP_CLKCTRL_INDEX(0xbc)
-#define AM3_SMARTREFLEX0_CLKCTRL       AM3_L4_WKUP_CLKCTRL_INDEX(0xc0)
-#define AM3_TIMER1_CLKCTRL     AM3_L4_WKUP_CLKCTRL_INDEX(0xc4)
-#define AM3_SMARTREFLEX1_CLKCTRL       AM3_L4_WKUP_CLKCTRL_INDEX(0xc8)
-#define AM3_WD_TIMER2_CLKCTRL  AM3_L4_WKUP_CLKCTRL_INDEX(0xd4)
-
-/* mpu clocks */
-#define AM3_MPU_CLKCTRL_OFFSET 0x4
-#define AM3_MPU_CLKCTRL_INDEX(offset)  ((offset) - AM3_MPU_CLKCTRL_OFFSET)
-#define AM3_MPU_CLKCTRL        AM3_MPU_CLKCTRL_INDEX(0x4)
-
-/* l4_rtc clocks */
-#define AM3_RTC_CLKCTRL        AM3_CLKCTRL_INDEX(0x0)
-
-/* gfx_l3 clocks */
-#define AM3_GFX_L3_CLKCTRL_OFFSET      0x4
-#define AM3_GFX_L3_CLKCTRL_INDEX(offset)       ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET)
-#define AM3_GFX_CLKCTRL        AM3_GFX_L3_CLKCTRL_INDEX(0x4)
-
-/* l4_cefuse clocks */
-#define AM3_L4_CEFUSE_CLKCTRL_OFFSET   0x20
-#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset)    ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET)
-#define AM3_CEFUSE_CLKCTRL     AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20)
-
-/* XXX: Compatibility part end */
-
 /* l4ls clocks */
 #define AM3_L4LS_CLKCTRL_OFFSET        0x38
 #define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET)
index 4be6c59..a65b082 100644 (file)
@@ -8,104 +8,6 @@
 #define AM4_CLKCTRL_OFFSET     0x20
 #define AM4_CLKCTRL_INDEX(offset)      ((offset) - AM4_CLKCTRL_OFFSET)
 
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* l4_wkup clocks */
-#define AM4_ADC_TSC_CLKCTRL    AM4_CLKCTRL_INDEX(0x120)
-#define AM4_L4_WKUP_CLKCTRL    AM4_CLKCTRL_INDEX(0x220)
-#define AM4_WKUP_M3_CLKCTRL    AM4_CLKCTRL_INDEX(0x228)
-#define AM4_COUNTER_32K_CLKCTRL        AM4_CLKCTRL_INDEX(0x230)
-#define AM4_TIMER1_CLKCTRL     AM4_CLKCTRL_INDEX(0x328)
-#define AM4_WD_TIMER2_CLKCTRL  AM4_CLKCTRL_INDEX(0x338)
-#define AM4_I2C1_CLKCTRL       AM4_CLKCTRL_INDEX(0x340)
-#define AM4_UART1_CLKCTRL      AM4_CLKCTRL_INDEX(0x348)
-#define AM4_SMARTREFLEX0_CLKCTRL       AM4_CLKCTRL_INDEX(0x350)
-#define AM4_SMARTREFLEX1_CLKCTRL       AM4_CLKCTRL_INDEX(0x358)
-#define AM4_CONTROL_CLKCTRL    AM4_CLKCTRL_INDEX(0x360)
-#define AM4_GPIO1_CLKCTRL      AM4_CLKCTRL_INDEX(0x368)
-
-/* mpu clocks */
-#define AM4_MPU_CLKCTRL        AM4_CLKCTRL_INDEX(0x20)
-
-/* gfx_l3 clocks */
-#define AM4_GFX_CLKCTRL        AM4_CLKCTRL_INDEX(0x20)
-
-/* l4_rtc clocks */
-#define AM4_RTC_CLKCTRL        AM4_CLKCTRL_INDEX(0x20)
-
-/* l4_per clocks */
-#define AM4_L3_MAIN_CLKCTRL    AM4_CLKCTRL_INDEX(0x20)
-#define AM4_AES_CLKCTRL        AM4_CLKCTRL_INDEX(0x28)
-#define AM4_DES_CLKCTRL        AM4_CLKCTRL_INDEX(0x30)
-#define AM4_L3_INSTR_CLKCTRL   AM4_CLKCTRL_INDEX(0x40)
-#define AM4_OCMCRAM_CLKCTRL    AM4_CLKCTRL_INDEX(0x50)
-#define AM4_SHAM_CLKCTRL       AM4_CLKCTRL_INDEX(0x58)
-#define AM4_VPFE0_CLKCTRL      AM4_CLKCTRL_INDEX(0x68)
-#define AM4_VPFE1_CLKCTRL      AM4_CLKCTRL_INDEX(0x70)
-#define AM4_TPCC_CLKCTRL       AM4_CLKCTRL_INDEX(0x78)
-#define AM4_TPTC0_CLKCTRL      AM4_CLKCTRL_INDEX(0x80)
-#define AM4_TPTC1_CLKCTRL      AM4_CLKCTRL_INDEX(0x88)
-#define AM4_TPTC2_CLKCTRL      AM4_CLKCTRL_INDEX(0x90)
-#define AM4_L4_HS_CLKCTRL      AM4_CLKCTRL_INDEX(0xa0)
-#define AM4_GPMC_CLKCTRL       AM4_CLKCTRL_INDEX(0x220)
-#define AM4_MCASP0_CLKCTRL     AM4_CLKCTRL_INDEX(0x238)
-#define AM4_MCASP1_CLKCTRL     AM4_CLKCTRL_INDEX(0x240)
-#define AM4_MMC3_CLKCTRL       AM4_CLKCTRL_INDEX(0x248)
-#define AM4_QSPI_CLKCTRL       AM4_CLKCTRL_INDEX(0x258)
-#define AM4_USB_OTG_SS0_CLKCTRL        AM4_CLKCTRL_INDEX(0x260)
-#define AM4_USB_OTG_SS1_CLKCTRL        AM4_CLKCTRL_INDEX(0x268)
-#define AM4_PRUSS_CLKCTRL      AM4_CLKCTRL_INDEX(0x320)
-#define AM4_L4_LS_CLKCTRL      AM4_CLKCTRL_INDEX(0x420)
-#define AM4_D_CAN0_CLKCTRL     AM4_CLKCTRL_INDEX(0x428)
-#define AM4_D_CAN1_CLKCTRL     AM4_CLKCTRL_INDEX(0x430)
-#define AM4_EPWMSS0_CLKCTRL    AM4_CLKCTRL_INDEX(0x438)
-#define AM4_EPWMSS1_CLKCTRL    AM4_CLKCTRL_INDEX(0x440)
-#define AM4_EPWMSS2_CLKCTRL    AM4_CLKCTRL_INDEX(0x448)
-#define AM4_EPWMSS3_CLKCTRL    AM4_CLKCTRL_INDEX(0x450)
-#define AM4_EPWMSS4_CLKCTRL    AM4_CLKCTRL_INDEX(0x458)
-#define AM4_EPWMSS5_CLKCTRL    AM4_CLKCTRL_INDEX(0x460)
-#define AM4_ELM_CLKCTRL        AM4_CLKCTRL_INDEX(0x468)
-#define AM4_GPIO2_CLKCTRL      AM4_CLKCTRL_INDEX(0x478)
-#define AM4_GPIO3_CLKCTRL      AM4_CLKCTRL_INDEX(0x480)
-#define AM4_GPIO4_CLKCTRL      AM4_CLKCTRL_INDEX(0x488)
-#define AM4_GPIO5_CLKCTRL      AM4_CLKCTRL_INDEX(0x490)
-#define AM4_GPIO6_CLKCTRL      AM4_CLKCTRL_INDEX(0x498)
-#define AM4_HDQ1W_CLKCTRL      AM4_CLKCTRL_INDEX(0x4a0)
-#define AM4_I2C2_CLKCTRL       AM4_CLKCTRL_INDEX(0x4a8)
-#define AM4_I2C3_CLKCTRL       AM4_CLKCTRL_INDEX(0x4b0)
-#define AM4_MAILBOX_CLKCTRL    AM4_CLKCTRL_INDEX(0x4b8)
-#define AM4_MMC1_CLKCTRL       AM4_CLKCTRL_INDEX(0x4c0)
-#define AM4_MMC2_CLKCTRL       AM4_CLKCTRL_INDEX(0x4c8)
-#define AM4_RNG_CLKCTRL        AM4_CLKCTRL_INDEX(0x4e0)
-#define AM4_SPI0_CLKCTRL       AM4_CLKCTRL_INDEX(0x500)
-#define AM4_SPI1_CLKCTRL       AM4_CLKCTRL_INDEX(0x508)
-#define AM4_SPI2_CLKCTRL       AM4_CLKCTRL_INDEX(0x510)
-#define AM4_SPI3_CLKCTRL       AM4_CLKCTRL_INDEX(0x518)
-#define AM4_SPI4_CLKCTRL       AM4_CLKCTRL_INDEX(0x520)
-#define AM4_SPINLOCK_CLKCTRL   AM4_CLKCTRL_INDEX(0x528)
-#define AM4_TIMER2_CLKCTRL     AM4_CLKCTRL_INDEX(0x530)
-#define AM4_TIMER3_CLKCTRL     AM4_CLKCTRL_INDEX(0x538)
-#define AM4_TIMER4_CLKCTRL     AM4_CLKCTRL_INDEX(0x540)
-#define AM4_TIMER5_CLKCTRL     AM4_CLKCTRL_INDEX(0x548)
-#define AM4_TIMER6_CLKCTRL     AM4_CLKCTRL_INDEX(0x550)
-#define AM4_TIMER7_CLKCTRL     AM4_CLKCTRL_INDEX(0x558)
-#define AM4_TIMER8_CLKCTRL     AM4_CLKCTRL_INDEX(0x560)
-#define AM4_TIMER9_CLKCTRL     AM4_CLKCTRL_INDEX(0x568)
-#define AM4_TIMER10_CLKCTRL    AM4_CLKCTRL_INDEX(0x570)
-#define AM4_TIMER11_CLKCTRL    AM4_CLKCTRL_INDEX(0x578)
-#define AM4_UART2_CLKCTRL      AM4_CLKCTRL_INDEX(0x580)
-#define AM4_UART3_CLKCTRL      AM4_CLKCTRL_INDEX(0x588)
-#define AM4_UART4_CLKCTRL      AM4_CLKCTRL_INDEX(0x590)
-#define AM4_UART5_CLKCTRL      AM4_CLKCTRL_INDEX(0x598)
-#define AM4_UART6_CLKCTRL      AM4_CLKCTRL_INDEX(0x5a0)
-#define AM4_OCP2SCP0_CLKCTRL   AM4_CLKCTRL_INDEX(0x5b8)
-#define AM4_OCP2SCP1_CLKCTRL   AM4_CLKCTRL_INDEX(0x5c0)
-#define AM4_EMIF_CLKCTRL       AM4_CLKCTRL_INDEX(0x720)
-#define AM4_DSS_CORE_CLKCTRL   AM4_CLKCTRL_INDEX(0xa20)
-#define AM4_CPGMAC0_CLKCTRL    AM4_CLKCTRL_INDEX(0xb20)
-
-/* XXX: Compatibility part end. */
-
 /* l3s_tsc clocks */
 #define AM4_L3S_TSC_CLKCTRL_OFFSET     0x120
 #define AM4_L3S_TSC_CLKCTRL_INDEX(offset)      ((offset) - AM4_L3S_TSC_CLKCTRL_OFFSET)
index 8498c0c..3e3972a 100644 (file)
@@ -24,6 +24,7 @@
 #define PMC_PLLACK             7
 #define PMC_PLLBCK             8
 #define PMC_AUDIOPLLCK         9
+#define PMC_AUDIOPINCK         10
 
 /* SAMA7G5 */
 #define PMC_CPUPLL             (PMC_MAIN + 1)
diff --git a/include/dt-bindings/clock/cirrus,cs2000-cp.h b/include/dt-bindings/clock/cirrus,cs2000-cp.h
new file mode 100644 (file)
index 0000000..fe3ac71
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Daniel Mack
+ */
+
+#ifndef __DT_BINDINGS_CS2000CP_CLK_H
+#define __DT_BINDINGS_CS2000CP_CLK_H
+
+#define CS2000CP_AUX_OUTPUT_REF_CLK    0
+#define CS2000CP_AUX_OUTPUT_CLK_IN     1
+#define CS2000CP_AUX_OUTPUT_CLK_OUT    2
+#define CS2000CP_AUX_OUTPUT_PLL_LOCK   3
+
+#endif /* __DT_BINDINGS_CS2000CP_CLK_H */
index 29ff6b8..8a903c7 100644 (file)
@@ -8,174 +8,6 @@
 #define DRA7_CLKCTRL_OFFSET    0x20
 #define DRA7_CLKCTRL_INDEX(offset)     ((offset) - DRA7_CLKCTRL_OFFSET)
 
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* mpu clocks */
-#define DRA7_MPU_CLKCTRL       DRA7_CLKCTRL_INDEX(0x20)
-
-/* ipu clocks */
-#define _DRA7_IPU_CLKCTRL_OFFSET       0x40
-#define _DRA7_IPU_CLKCTRL_INDEX(offset)        ((offset) - _DRA7_IPU_CLKCTRL_OFFSET)
-#define DRA7_MCASP1_CLKCTRL    _DRA7_IPU_CLKCTRL_INDEX(0x50)
-#define DRA7_TIMER5_CLKCTRL    _DRA7_IPU_CLKCTRL_INDEX(0x58)
-#define DRA7_TIMER6_CLKCTRL    _DRA7_IPU_CLKCTRL_INDEX(0x60)
-#define DRA7_TIMER7_CLKCTRL    _DRA7_IPU_CLKCTRL_INDEX(0x68)
-#define DRA7_TIMER8_CLKCTRL    _DRA7_IPU_CLKCTRL_INDEX(0x70)
-#define DRA7_I2C5_CLKCTRL      _DRA7_IPU_CLKCTRL_INDEX(0x78)
-#define DRA7_UART6_CLKCTRL     _DRA7_IPU_CLKCTRL_INDEX(0x80)
-
-/* rtc clocks */
-#define DRA7_RTC_CLKCTRL_OFFSET        0x40
-#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET)
-#define DRA7_RTCSS_CLKCTRL     DRA7_RTC_CLKCTRL_INDEX(0x44)
-
-/* vip clocks */
-#define DRA7_VIP1_CLKCTRL      DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_VIP2_CLKCTRL      DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_VIP3_CLKCTRL      DRA7_CLKCTRL_INDEX(0x30)
-
-/* vpe clocks */
-#define DRA7_VPE_CLKCTRL_OFFSET        0x60
-#define DRA7_VPE_CLKCTRL_INDEX(offset) ((offset) - DRA7_VPE_CLKCTRL_OFFSET)
-#define DRA7_VPE_CLKCTRL       DRA7_VPE_CLKCTRL_INDEX(0x64)
-
-/* coreaon clocks */
-#define DRA7_SMARTREFLEX_MPU_CLKCTRL   DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_SMARTREFLEX_CORE_CLKCTRL  DRA7_CLKCTRL_INDEX(0x38)
-
-/* l3main1 clocks */
-#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_GPMC_CLKCTRL      DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_TPCC_CLKCTRL      DRA7_CLKCTRL_INDEX(0x70)
-#define DRA7_TPTC0_CLKCTRL     DRA7_CLKCTRL_INDEX(0x78)
-#define DRA7_TPTC1_CLKCTRL     DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_VCP1_CLKCTRL      DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_VCP2_CLKCTRL      DRA7_CLKCTRL_INDEX(0x90)
-
-/* dma clocks */
-#define DRA7_DMA_SYSTEM_CLKCTRL        DRA7_CLKCTRL_INDEX(0x20)
-
-/* emif clocks */
-#define DRA7_DMM_CLKCTRL       DRA7_CLKCTRL_INDEX(0x20)
-
-/* atl clocks */
-#define DRA7_ATL_CLKCTRL_OFFSET        0x0
-#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET)
-#define DRA7_ATL_CLKCTRL       DRA7_ATL_CLKCTRL_INDEX(0x0)
-
-/* l4cfg clocks */
-#define DRA7_L4_CFG_CLKCTRL    DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SPINLOCK_CLKCTRL  DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_MAILBOX1_CLKCTRL  DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_MAILBOX2_CLKCTRL  DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_MAILBOX3_CLKCTRL  DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_MAILBOX4_CLKCTRL  DRA7_CLKCTRL_INDEX(0x58)
-#define DRA7_MAILBOX5_CLKCTRL  DRA7_CLKCTRL_INDEX(0x60)
-#define DRA7_MAILBOX6_CLKCTRL  DRA7_CLKCTRL_INDEX(0x68)
-#define DRA7_MAILBOX7_CLKCTRL  DRA7_CLKCTRL_INDEX(0x70)
-#define DRA7_MAILBOX8_CLKCTRL  DRA7_CLKCTRL_INDEX(0x78)
-#define DRA7_MAILBOX9_CLKCTRL  DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
-#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98)
-#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
-
-/* l3instr clocks */
-#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_L3_INSTR_CLKCTRL  DRA7_CLKCTRL_INDEX(0x28)
-
-/* dss clocks */
-#define DRA7_DSS_CORE_CLKCTRL  DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_BB2D_CLKCTRL      DRA7_CLKCTRL_INDEX(0x30)
-
-/* l3init clocks */
-#define DRA7_MMC1_CLKCTRL      DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_MMC2_CLKCTRL      DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_USB_OTG_SS2_CLKCTRL       DRA7_CLKCTRL_INDEX(0x40)
-#define DRA7_USB_OTG_SS3_CLKCTRL       DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_USB_OTG_SS4_CLKCTRL       DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_SATA_CLKCTRL      DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_PCIE1_CLKCTRL     DRA7_CLKCTRL_INDEX(0xb0)
-#define DRA7_PCIE2_CLKCTRL     DRA7_CLKCTRL_INDEX(0xb8)
-#define DRA7_GMAC_CLKCTRL      DRA7_CLKCTRL_INDEX(0xd0)
-#define DRA7_OCP2SCP1_CLKCTRL  DRA7_CLKCTRL_INDEX(0xe0)
-#define DRA7_OCP2SCP3_CLKCTRL  DRA7_CLKCTRL_INDEX(0xe8)
-#define DRA7_USB_OTG_SS1_CLKCTRL       DRA7_CLKCTRL_INDEX(0xf0)
-
-/* l4per clocks */
-#define _DRA7_L4PER_CLKCTRL_OFFSET     0x0
-#define _DRA7_L4PER_CLKCTRL_INDEX(offset)      ((offset) - _DRA7_L4PER_CLKCTRL_OFFSET)
-#define DRA7_L4_PER2_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0xc)
-#define DRA7_L4_PER3_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0x14)
-#define DRA7_TIMER10_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0x28)
-#define DRA7_TIMER11_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0x30)
-#define DRA7_TIMER2_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER3_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER4_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x48)
-#define DRA7_TIMER9_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x50)
-#define DRA7_ELM_CLKCTRL       _DRA7_L4PER_CLKCTRL_INDEX(0x58)
-#define DRA7_GPIO2_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x60)
-#define DRA7_GPIO3_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x68)
-#define DRA7_GPIO4_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x70)
-#define DRA7_GPIO5_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x78)
-#define DRA7_GPIO6_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x80)
-#define DRA7_HDQ1W_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x88)
-#define DRA7_EPWMSS1_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0x90)
-#define DRA7_EPWMSS2_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0x98)
-#define DRA7_I2C1_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0xa0)
-#define DRA7_I2C2_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0xa8)
-#define DRA7_I2C3_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0xb0)
-#define DRA7_I2C4_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0xb8)
-#define DRA7_L4_PER1_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0xc0)
-#define DRA7_EPWMSS0_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0xc4)
-#define DRA7_TIMER13_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0xc8)
-#define DRA7_TIMER14_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0xd0)
-#define DRA7_TIMER15_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0xd8)
-#define DRA7_MCSPI1_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0xf0)
-#define DRA7_MCSPI2_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0xf8)
-#define DRA7_MCSPI3_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x100)
-#define DRA7_MCSPI4_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x108)
-#define DRA7_GPIO7_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x110)
-#define DRA7_GPIO8_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x118)
-#define DRA7_MMC3_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0x120)
-#define DRA7_MMC4_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0x128)
-#define DRA7_TIMER16_CLKCTRL   _DRA7_L4PER_CLKCTRL_INDEX(0x130)
-#define DRA7_QSPI_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0x138)
-#define DRA7_UART1_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x140)
-#define DRA7_UART2_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x148)
-#define DRA7_UART3_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x150)
-#define DRA7_UART4_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x158)
-#define DRA7_MCASP2_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x160)
-#define DRA7_MCASP3_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x168)
-#define DRA7_UART5_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x170)
-#define DRA7_MCASP5_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x178)
-#define DRA7_MCASP8_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x190)
-#define DRA7_MCASP4_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x198)
-#define DRA7_AES1_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
-#define DRA7_AES2_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
-#define DRA7_DES_CLKCTRL       _DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
-#define DRA7_RNG_CLKCTRL       _DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
-#define DRA7_SHAM_CLKCTRL      _DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
-#define DRA7_UART7_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
-#define DRA7_UART8_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
-#define DRA7_UART9_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
-#define DRA7_DCAN2_CLKCTRL     _DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
-#define DRA7_MCASP6_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x204)
-#define DRA7_MCASP7_CLKCTRL    _DRA7_L4PER_CLKCTRL_INDEX(0x208)
-
-/* wkupaon clocks */
-#define DRA7_L4_WKUP_CLKCTRL   DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_GPIO1_CLKCTRL     DRA7_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER1_CLKCTRL    DRA7_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER12_CLKCTRL   DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_COUNTER_32K_CLKCTRL       DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_UART10_CLKCTRL    DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_DCAN1_CLKCTRL     DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_ADC_CLKCTRL       DRA7_CLKCTRL_INDEX(0xa0)
-
-/* XXX: Compatibility part end. */
-
 /* mpu clocks */
 #define DRA7_MPU_MPU_CLKCTRL   DRA7_CLKCTRL_INDEX(0x20)
 
diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
new file mode 100644 (file)
index 0000000..21fda9c
--- /dev/null
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Copyright 2022 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX93_CLK_H
+#define __DT_BINDINGS_CLOCK_IMX93_CLK_H
+
+#define IMX93_CLK_DUMMY                        0
+#define IMX93_CLK_24M                  1
+#define IMX93_CLK_EXT1                 2
+#define IMX93_CLK_SYS_PLL_PFD0         3
+#define IMX93_CLK_SYS_PLL_PFD0_DIV2    4
+#define IMX93_CLK_SYS_PLL_PFD1         5
+#define IMX93_CLK_SYS_PLL_PFD1_DIV2    6
+#define IMX93_CLK_SYS_PLL_PFD2         7
+#define IMX93_CLK_SYS_PLL_PFD2_DIV2    8
+#define IMX93_CLK_AUDIO_PLL            9
+#define IMX93_CLK_VIDEO_PLL            10
+#define IMX93_CLK_A55_PERIPH           11
+#define IMX93_CLK_A55_MTR_BUS          12
+#define IMX93_CLK_A55                  13
+#define IMX93_CLK_M33                  14
+#define IMX93_CLK_BUS_WAKEUP           15
+#define IMX93_CLK_BUS_AON              16
+#define IMX93_CLK_WAKEUP_AXI           17
+#define IMX93_CLK_SWO_TRACE            18
+#define IMX93_CLK_M33_SYSTICK          19
+#define IMX93_CLK_FLEXIO1              20
+#define IMX93_CLK_FLEXIO2              21
+#define IMX93_CLK_LPIT1                        22
+#define IMX93_CLK_LPIT2                        23
+#define IMX93_CLK_LPTMR1               24
+#define IMX93_CLK_LPTMR2               25
+#define IMX93_CLK_TPM1                 26
+#define IMX93_CLK_TPM2                 27
+#define IMX93_CLK_TPM3                 28
+#define IMX93_CLK_TPM4                 29
+#define IMX93_CLK_TPM5                 30
+#define IMX93_CLK_TPM6                 31
+#define IMX93_CLK_FLEXSPI1             32
+#define IMX93_CLK_CAN1                 33
+#define IMX93_CLK_CAN2                 34
+#define IMX93_CLK_LPUART1              35
+#define IMX93_CLK_LPUART2              36
+#define IMX93_CLK_LPUART3              37
+#define IMX93_CLK_LPUART4              38
+#define IMX93_CLK_LPUART5              39
+#define IMX93_CLK_LPUART6              40
+#define IMX93_CLK_LPUART7              41
+#define IMX93_CLK_LPUART8              42
+#define IMX93_CLK_LPI2C1               43
+#define IMX93_CLK_LPI2C2               44
+#define IMX93_CLK_LPI2C3               45
+#define IMX93_CLK_LPI2C4               46
+#define IMX93_CLK_LPI2C5               47
+#define IMX93_CLK_LPI2C6               48
+#define IMX93_CLK_LPI2C7               49
+#define IMX93_CLK_LPI2C8               50
+#define IMX93_CLK_LPSPI1               51
+#define IMX93_CLK_LPSPI2               52
+#define IMX93_CLK_LPSPI3               53
+#define IMX93_CLK_LPSPI4               54
+#define IMX93_CLK_LPSPI5               55
+#define IMX93_CLK_LPSPI6               56
+#define IMX93_CLK_LPSPI7               57
+#define IMX93_CLK_LPSPI8               58
+#define IMX93_CLK_I3C1                 59
+#define IMX93_CLK_I3C2                 60
+#define IMX93_CLK_USDHC1               61
+#define IMX93_CLK_USDHC2               62
+#define IMX93_CLK_USDHC3               63
+#define IMX93_CLK_SAI1                 64
+#define IMX93_CLK_SAI2                 65
+#define IMX93_CLK_SAI3                 66
+#define IMX93_CLK_CCM_CKO1             67
+#define IMX93_CLK_CCM_CKO2             68
+#define IMX93_CLK_CCM_CKO3             69
+#define IMX93_CLK_CCM_CKO4             70
+#define IMX93_CLK_HSIO                 71
+#define IMX93_CLK_HSIO_USB_TEST_60M    72
+#define IMX93_CLK_HSIO_ACSCAN_80M      73
+#define IMX93_CLK_HSIO_ACSCAN_480M     74
+#define IMX93_CLK_ML_APB               75
+#define IMX93_CLK_ML                   76
+#define IMX93_CLK_MEDIA_AXI            77
+#define IMX93_CLK_MEDIA_APB            78
+#define IMX93_CLK_MEDIA_LDB            79
+#define IMX93_CLK_MEDIA_DISP_PIX       80
+#define IMX93_CLK_CAM_PIX              81
+#define IMX93_CLK_MIPI_TEST_BYTE       82
+#define IMX93_CLK_MIPI_PHY_CFG         83
+#define IMX93_CLK_ADC                  84
+#define IMX93_CLK_PDM                  85
+#define IMX93_CLK_TSTMR1               86
+#define IMX93_CLK_TSTMR2               87
+#define IMX93_CLK_MQS1                 88
+#define IMX93_CLK_MQS2                 89
+#define IMX93_CLK_AUDIO_XCVR           90
+#define IMX93_CLK_SPDIF                        91
+#define IMX93_CLK_ENET                 92
+#define IMX93_CLK_ENET_TIMER1          93
+#define IMX93_CLK_ENET_TIMER2          94
+#define IMX93_CLK_ENET_REF             95
+#define IMX93_CLK_ENET_REF_PHY         96
+#define IMX93_CLK_I3C1_SLOW            97
+#define IMX93_CLK_I3C2_SLOW            98
+#define IMX93_CLK_USB_PHY_BURUNIN      99
+#define IMX93_CLK_PAL_CAME_SCAN                100
+#define IMX93_CLK_A55_GATE             101
+#define IMX93_CLK_CM33_GATE            102
+#define IMX93_CLK_ADC1_GATE            103
+#define IMX93_CLK_WDOG1_GATE           104
+#define IMX93_CLK_WDOG2_GATE           105
+#define IMX93_CLK_WDOG3_GATE           106
+#define IMX93_CLK_WDOG4_GATE           107
+#define IMX93_CLK_WDOG5_GATE           108
+#define IMX93_CLK_SEMA1_GATE           109
+#define IMX93_CLK_SEMA2_GATE           110
+#define IMX93_CLK_MU_A_GATE            111
+#define IMX93_CLK_MU_B_GATE            112
+#define IMX93_CLK_EDMA1_GATE           113
+#define IMX93_CLK_EDMA2_GATE           114
+#define IMX93_CLK_FLEXSPI1_GATE                115
+#define IMX93_CLK_GPIO1_GATE           116
+#define IMX93_CLK_GPIO2_GATE           117
+#define IMX93_CLK_GPIO3_GATE           118
+#define IMX93_CLK_GPIO4_GATE           119
+#define IMX93_CLK_FLEXIO1_GATE         120
+#define IMX93_CLK_FLEXIO2_GATE         121
+#define IMX93_CLK_LPIT1_GATE           122
+#define IMX93_CLK_LPIT2_GATE           123
+#define IMX93_CLK_LPTMR1_GATE          124
+#define IMX93_CLK_LPTMR2_GATE          125
+#define IMX93_CLK_TPM1_GATE            126
+#define IMX93_CLK_TPM2_GATE            127
+#define IMX93_CLK_TPM3_GATE            128
+#define IMX93_CLK_TPM4_GATE            129
+#define IMX93_CLK_TPM5_GATE            130
+#define IMX93_CLK_TPM6_GATE            131
+#define IMX93_CLK_CAN1_GATE            132
+#define IMX93_CLK_CAN2_GATE            133
+#define IMX93_CLK_LPUART1_GATE         134
+#define IMX93_CLK_LPUART2_GATE         135
+#define IMX93_CLK_LPUART3_GATE         136
+#define IMX93_CLK_LPUART4_GATE         137
+#define IMX93_CLK_LPUART5_GATE         138
+#define IMX93_CLK_LPUART6_GATE         139
+#define IMX93_CLK_LPUART7_GATE         140
+#define IMX93_CLK_LPUART8_GATE         141
+#define IMX93_CLK_LPI2C1_GATE          142
+#define IMX93_CLK_LPI2C2_GATE          143
+#define IMX93_CLK_LPI2C3_GATE          144
+#define IMX93_CLK_LPI2C4_GATE          145
+#define IMX93_CLK_LPI2C5_GATE          146
+#define IMX93_CLK_LPI2C6_GATE          147
+#define IMX93_CLK_LPI2C7_GATE          148
+#define IMX93_CLK_LPI2C8_GATE          149
+#define IMX93_CLK_LPSPI1_GATE          150
+#define IMX93_CLK_LPSPI2_GATE          151
+#define IMX93_CLK_LPSPI3_GATE          152
+#define IMX93_CLK_LPSPI4_GATE          153
+#define IMX93_CLK_LPSPI5_GATE          154
+#define IMX93_CLK_LPSPI6_GATE          155
+#define IMX93_CLK_LPSPI7_GATE          156
+#define IMX93_CLK_LPSPI8_GATE          157
+#define IMX93_CLK_I3C1_GATE            158
+#define IMX93_CLK_I3C2_GATE            159
+#define IMX93_CLK_USDHC1_GATE          160
+#define IMX93_CLK_USDHC2_GATE          161
+#define IMX93_CLK_USDHC3_GATE          162
+#define IMX93_CLK_SAI1_GATE            163
+#define IMX93_CLK_SAI2_GATE            164
+#define IMX93_CLK_SAI3_GATE            165
+#define IMX93_CLK_MIPI_CSI_GATE                166
+#define IMX93_CLK_MIPI_DSI_GATE                167
+#define IMX93_CLK_LVDS_GATE            168
+#define IMX93_CLK_LCDIF_GATE           169
+#define IMX93_CLK_PXP_GATE             170
+#define IMX93_CLK_ISI_GATE             171
+#define IMX93_CLK_NIC_MEDIA_GATE       172
+#define IMX93_CLK_USB_CONTROLLER_GATE  173
+#define IMX93_CLK_USB_TEST_60M_GATE    174
+#define IMX93_CLK_HSIO_TROUT_24M_GATE  175
+#define IMX93_CLK_PDM_GATE             176
+#define IMX93_CLK_MQS1_GATE            177
+#define IMX93_CLK_MQS2_GATE            178
+#define IMX93_CLK_AUD_XCVR_GATE                179
+#define IMX93_CLK_SPDIF_GATE           180
+#define IMX93_CLK_HSIO_32K_GATE                181
+#define IMX93_CLK_ENET1_GATE           182
+#define IMX93_CLK_ENET_QOS_GATE                183
+#define IMX93_CLK_SYS_CNT_GATE         184
+#define IMX93_CLK_TSTMR1_GATE          185
+#define IMX93_CLK_TSTMR2_GATE          186
+#define IMX93_CLK_TMC_GATE             187
+#define IMX93_CLK_PMRO_GATE            188
+#define IMX93_CLK_32K                  189
+#define IMX93_CLK_END                  190
+
+#endif
diff --git a/include/dt-bindings/clock/imxrt1050-clock.h b/include/dt-bindings/clock/imxrt1050-clock.h
new file mode 100644 (file)
index 0000000..93bef08
--- /dev/null
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright(C) 2019
+ * Author(s): Giulio Benetti <giulio.benetti@benettiengineering.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMXRT1050_H
+#define __DT_BINDINGS_CLOCK_IMXRT1050_H
+
+#define IMXRT1050_CLK_DUMMY                    0
+#define IMXRT1050_CLK_CKIL                     1
+#define IMXRT1050_CLK_CKIH                     2
+#define IMXRT1050_CLK_OSC                      3
+#define IMXRT1050_CLK_PLL2_PFD0_352M           4
+#define IMXRT1050_CLK_PLL2_PFD1_594M           5
+#define IMXRT1050_CLK_PLL2_PFD2_396M           6
+#define IMXRT1050_CLK_PLL3_PFD0_720M           7
+#define IMXRT1050_CLK_PLL3_PFD1_664_62M                8
+#define IMXRT1050_CLK_PLL3_PFD2_508_24M                9
+#define IMXRT1050_CLK_PLL3_PFD3_454_74M                10
+#define IMXRT1050_CLK_PLL2_198M                        11
+#define IMXRT1050_CLK_PLL3_120M                        12
+#define IMXRT1050_CLK_PLL3_80M                 13
+#define IMXRT1050_CLK_PLL3_60M                 14
+#define IMXRT1050_CLK_PLL1_BYPASS              15
+#define IMXRT1050_CLK_PLL2_BYPASS              16
+#define IMXRT1050_CLK_PLL3_BYPASS              17
+#define IMXRT1050_CLK_PLL5_BYPASS              19
+#define IMXRT1050_CLK_PLL1_REF_SEL             20
+#define IMXRT1050_CLK_PLL2_REF_SEL             21
+#define IMXRT1050_CLK_PLL3_REF_SEL             22
+#define IMXRT1050_CLK_PLL5_REF_SEL             23
+#define IMXRT1050_CLK_PRE_PERIPH_SEL           24
+#define IMXRT1050_CLK_PERIPH_SEL               25
+#define IMXRT1050_CLK_SEMC_ALT_SEL             26
+#define IMXRT1050_CLK_SEMC_SEL                 27
+#define IMXRT1050_CLK_USDHC1_SEL               28
+#define IMXRT1050_CLK_USDHC2_SEL               29
+#define IMXRT1050_CLK_LPUART_SEL               30
+#define IMXRT1050_CLK_LCDIF_SEL                        31
+#define IMXRT1050_CLK_VIDEO_POST_DIV_SEL       32
+#define IMXRT1050_CLK_VIDEO_DIV                        33
+#define IMXRT1050_CLK_ARM_PODF                 34
+#define IMXRT1050_CLK_LPUART_PODF              35
+#define IMXRT1050_CLK_USDHC1_PODF              36
+#define IMXRT1050_CLK_USDHC2_PODF              37
+#define IMXRT1050_CLK_SEMC_PODF                        38
+#define IMXRT1050_CLK_AHB_PODF                 39
+#define IMXRT1050_CLK_LCDIF_PRED               40
+#define IMXRT1050_CLK_LCDIF_PODF               41
+#define IMXRT1050_CLK_USDHC1                   42
+#define IMXRT1050_CLK_USDHC2                   43
+#define IMXRT1050_CLK_LPUART1                  44
+#define IMXRT1050_CLK_SEMC                     45
+#define IMXRT1050_CLK_LCDIF_APB                        46
+#define IMXRT1050_CLK_PLL1_ARM                 47
+#define IMXRT1050_CLK_PLL2_SYS                 48
+#define IMXRT1050_CLK_PLL3_USB_OTG             49
+#define IMXRT1050_CLK_PLL4_AUDIO               50
+#define IMXRT1050_CLK_PLL5_VIDEO               51
+#define IMXRT1050_CLK_PLL6_ENET                        52
+#define IMXRT1050_CLK_PLL7_USB_HOST            53
+#define IMXRT1050_CLK_LCDIF_PIX                        54
+#define IMXRT1050_CLK_USBOH3                   55
+#define IMXRT1050_CLK_IPG_PDOF                 56
+#define IMXRT1050_CLK_PER_CLK_SEL              57
+#define IMXRT1050_CLK_PER_PDOF                 58
+#define IMXRT1050_CLK_DMA                      59
+#define IMXRT1050_CLK_DMA_MUX                  60
+#define IMXRT1050_CLK_END                      61
+
+#endif /* __DT_BINDINGS_CLOCK_IMXRT1050_H */
diff --git a/include/dt-bindings/clock/qcom,dispcc-qcm2290.h b/include/dt-bindings/clock/qcom,dispcc-qcm2290.h
new file mode 100644 (file)
index 0000000..1db513d
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCM2290_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0                           0
+#define DISP_CC_MDSS_AHB_CLK                   1
+#define DISP_CC_MDSS_AHB_CLK_SRC               2
+#define DISP_CC_MDSS_BYTE0_CLK                 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC             4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC         5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK            6
+#define DISP_CC_MDSS_ESC0_CLK                  7
+#define DISP_CC_MDSS_ESC0_CLK_SRC              8
+#define DISP_CC_MDSS_MDP_CLK                   9
+#define DISP_CC_MDSS_MDP_CLK_SRC               10
+#define DISP_CC_MDSS_MDP_LUT_CLK               11
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK          12
+#define DISP_CC_MDSS_PCLK0_CLK                 13
+#define DISP_CC_MDSS_PCLK0_CLK_SRC             14
+#define DISP_CC_MDSS_VSYNC_CLK                 15
+#define DISP_CC_MDSS_VSYNC_CLK_SRC             16
+#define DISP_CC_SLEEP_CLK                      17
+#define DISP_CC_SLEEP_CLK_SRC                  18
+#define DISP_CC_XO_CLK                         19
+#define DISP_CC_XO_CLK_SRC                     20
+
+#define MDSS_GDSC                              0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6125.h b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
new file mode 100644 (file)
index 0000000..4ff974f
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+
+#define DISP_CC_PLL0                   0
+#define DISP_CC_MDSS_AHB_CLK           1
+#define DISP_CC_MDSS_AHB_CLK_SRC       2
+#define DISP_CC_MDSS_BYTE0_CLK         3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC     4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK    5
+#define DISP_CC_MDSS_DP_AUX_CLK                6
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC    7
+#define DISP_CC_MDSS_DP_CRYPTO_CLK     8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9
+#define DISP_CC_MDSS_DP_LINK_CLK       10
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC   11
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK  12
+#define DISP_CC_MDSS_DP_PIXEL_CLK      13
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC  14
+#define DISP_CC_MDSS_ESC0_CLK          15
+#define DISP_CC_MDSS_ESC0_CLK_SRC      16
+#define DISP_CC_MDSS_MDP_CLK           17
+#define DISP_CC_MDSS_MDP_CLK_SRC       18
+#define DISP_CC_MDSS_MDP_LUT_CLK       19
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK  20
+#define DISP_CC_MDSS_PCLK0_CLK         21
+#define DISP_CC_MDSS_PCLK0_CLK_SRC     22
+#define DISP_CC_MDSS_ROT_CLK           23
+#define DISP_CC_MDSS_ROT_CLK_SRC       24
+#define DISP_CC_MDSS_VSYNC_CLK         25
+#define DISP_CC_MDSS_VSYNC_CLK_SRC     26
+#define DISP_CC_XO_CLK                 27
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC                      0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6350.h b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
new file mode 100644 (file)
index 0000000..cb54aae
--- /dev/null
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6350_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0                           0
+#define DISP_CC_MDSS_AHB_CLK                   1
+#define DISP_CC_MDSS_AHB_CLK_SRC               2
+#define DISP_CC_MDSS_BYTE0_CLK                 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC             4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC         5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK            6
+#define DISP_CC_MDSS_DP_AUX_CLK                        7
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC            8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK             9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC         10
+#define DISP_CC_MDSS_DP_LINK_CLK               11
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC           12
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC       13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK          14
+#define DISP_CC_MDSS_DP_PIXEL_CLK              15
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC          16
+#define DISP_CC_MDSS_ESC0_CLK                  17
+#define DISP_CC_MDSS_ESC0_CLK_SRC              18
+#define DISP_CC_MDSS_MDP_CLK                   19
+#define DISP_CC_MDSS_MDP_CLK_SRC               20
+#define DISP_CC_MDSS_MDP_LUT_CLK               21
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK          22
+#define DISP_CC_MDSS_PCLK0_CLK                 23
+#define DISP_CC_MDSS_PCLK0_CLK_SRC             24
+#define DISP_CC_MDSS_ROT_CLK                   25
+#define DISP_CC_MDSS_ROT_CLK_SRC               26
+#define DISP_CC_MDSS_RSCC_AHB_CLK              27
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK            28
+#define DISP_CC_MDSS_VSYNC_CLK                 29
+#define DISP_CC_MDSS_VSYNC_CLK_SRC             30
+#define DISP_CC_SLEEP_CLK                      31
+#define DISP_CC_XO_CLK                         32
+
+/* GDSCs */
+#define MDSS_GDSC                              0
+
+#endif
index 7deec14..02262d2 100644 (file)
 #define PLL14                                  232
 #define PLL14_VOTE                             233
 #define PLL18                                  234
-#define CE5_SRC                                        235
+#define CE5_A_CLK                              235
 #define CE5_H_CLK                              236
 #define CE5_CORE_CLK                           237
 #define CE3_SLEEP_CLK                          238
 #define EBI2_AON_CLK                           281
 #define NSSTCM_CLK_SRC                         282
 #define NSSTCM_CLK                             283
+#define CE5_A_CLK_SRC                          285
+#define CE5_H_CLK_SRC                          286
+#define CE5_CORE_CLK_SRC                       287
 
 #endif
index 3e1a918..dfefd5e 100644 (file)
 #define GCC_USB_PHY_CFG_AHB2PHY_BCR                            28
 
 /* GCC GDSCRs */
+#define PCIE_0_GDSC                                            0
+#define PCIE_1_GDSC                                            1
+#define UFS_CARD_GDSC                                          2
+#define UFS_PHY_GDSC                                           3
 #define USB30_PRIM_GDSC                     4
 #define USB30_SEC_GDSC                                         5
+#define EMAC_GDSC                                              6
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm6350.h b/include/dt-bindings/clock/qcom,gpucc-sm6350.h
new file mode 100644 (file)
index 0000000..68e814f
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6350_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0                                            0
+#define GPU_CC_PLL1                                            1
+#define GPU_CC_ACD_AHB_CLK                                     2
+#define GPU_CC_ACD_CXO_CLK                                     3
+#define GPU_CC_AHB_CLK                                         4
+#define GPU_CC_CRC_AHB_CLK                                     5
+#define GPU_CC_CX_GFX3D_CLK                                    6
+#define GPU_CC_CX_GFX3D_SLV_CLK                                        7
+#define GPU_CC_CX_GMU_CLK                                      8
+#define GPU_CC_CX_SNOC_DVM_CLK                                 9
+#define GPU_CC_CXO_AON_CLK                                     10
+#define GPU_CC_CXO_CLK                                         11
+#define GPU_CC_GMU_CLK_SRC                                     12
+#define GPU_CC_GX_CXO_CLK                                      13
+#define GPU_CC_GX_GFX3D_CLK                                    14
+#define GPU_CC_GX_GFX3D_CLK_SRC                                        15
+#define GPU_CC_GX_GMU_CLK                                      16
+#define GPU_CC_GX_VSENSE_CLK                                   17
+
+/* CLK_HW */
+#define GPU_CC_CRC_DIV                                         0
+
+/* GDSCs */
+#define GPU_CX_GDSC                                            0
+#define GPU_GX_GDSC                                            1
+
+#endif
index fb624ff..015db95 100644 (file)
 #define RPM_SMD_PKA_A_CLK                      119
 #define RPM_SMD_CPUSS_GNOC_CLK                 120
 #define RPM_SMD_CPUSS_GNOC_A_CLK               121
+#define RPM_SMD_MSS_CFG_AHB_CLK                122
+#define RPM_SMD_MSS_CFG_AHB_A_CLK              123
 
 #endif
index 3b21d05..5af372e 100644 (file)
@@ -10,9 +10,9 @@
 
 /* Clock indexes for use by Device Tree data and the PRCI driver */
 
-#define PRCI_CLK_COREPLL              0
-#define PRCI_CLK_DDRPLL                       1
-#define PRCI_CLK_GEMGXLPLL            2
-#define PRCI_CLK_TLCLK                3
+#define FU540_PRCI_CLK_COREPLL         0
+#define FU540_PRCI_CLK_DDRPLL          1
+#define FU540_PRCI_CLK_GEMGXLPLL       2
+#define FU540_PRCI_CLK_TLCLK           3
 
 #endif
index 7899b7f..672bdad 100644 (file)
 
 /* Clock indexes for use by Device Tree data and the PRCI driver */
 
-#define PRCI_CLK_COREPLL              0
-#define PRCI_CLK_DDRPLL                       1
-#define PRCI_CLK_GEMGXLPLL            2
-#define PRCI_CLK_DVFSCOREPLL          3
-#define PRCI_CLK_HFPCLKPLL            4
-#define PRCI_CLK_CLTXPLL              5
-#define PRCI_CLK_TLCLK                6
-#define PRCI_CLK_PCLK                 7
-#define PRCI_CLK_PCIE_AUX             8
+#define FU740_PRCI_CLK_COREPLL         0
+#define FU740_PRCI_CLK_DDRPLL          1
+#define FU740_PRCI_CLK_GEMGXLPLL       2
+#define FU740_PRCI_CLK_DVFSCOREPLL     3
+#define FU740_PRCI_CLK_HFPCLKPLL       4
+#define FU740_PRCI_CLK_CLTXPLL         5
+#define FU740_PRCI_CLK_TLCLK           6
+#define FU740_PRCI_CLK_PCLK            7
+#define FU740_PRCI_CLK_PCIE_AUX                8
 
 #endif /* __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H */
diff --git a/include/dt-bindings/clock/starfive-jh7100-audio.h b/include/dt-bindings/clock/starfive-jh7100-audio.h
new file mode 100644 (file)
index 0000000..fbb4eae
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__
+
+#define JH7100_AUDCLK_ADC_MCLK         0
+#define JH7100_AUDCLK_I2S1_MCLK                1
+#define JH7100_AUDCLK_I2SADC_APB       2
+#define JH7100_AUDCLK_I2SADC_BCLK      3
+#define JH7100_AUDCLK_I2SADC_BCLK_N    4
+#define JH7100_AUDCLK_I2SADC_LRCLK     5
+#define JH7100_AUDCLK_PDM_APB          6
+#define JH7100_AUDCLK_PDM_MCLK         7
+#define JH7100_AUDCLK_I2SVAD_APB       8
+#define JH7100_AUDCLK_SPDIF            9
+#define JH7100_AUDCLK_SPDIF_APB                10
+#define JH7100_AUDCLK_PWMDAC_APB       11
+#define JH7100_AUDCLK_DAC_MCLK         12
+#define JH7100_AUDCLK_I2SDAC_APB       13
+#define JH7100_AUDCLK_I2SDAC_BCLK      14
+#define JH7100_AUDCLK_I2SDAC_BCLK_N    15
+#define JH7100_AUDCLK_I2SDAC_LRCLK     16
+#define JH7100_AUDCLK_I2S1_APB         17
+#define JH7100_AUDCLK_I2S1_BCLK                18
+#define JH7100_AUDCLK_I2S1_BCLK_N      19
+#define JH7100_AUDCLK_I2S1_LRCLK       20
+#define JH7100_AUDCLK_I2SDAC16K_APB    21
+#define JH7100_AUDCLK_APB0_BUS         22
+#define JH7100_AUDCLK_DMA1P_AHB                23
+#define JH7100_AUDCLK_USB_APB          24
+#define JH7100_AUDCLK_USB_LPM          25
+#define JH7100_AUDCLK_USB_STB          26
+#define JH7100_AUDCLK_APB_EN           27
+#define JH7100_AUDCLK_VAD_MEM          28
+
+#define JH7100_AUDCLK_END              29
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__ */
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
new file mode 100644 (file)
index 0000000..c845493
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
+#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
+
+#define CLK_OSC32K             0
+#define CLK_OSC32K_FANOUT      1
+#define CLK_IOSC               2
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_RTC_H_ */
index 26b6f92..020c9cf 100644 (file)
 #define NSS_CAL_PRBS_RST_N_RESET                       154
 #define NSS_LCKDT_RST_N_RESET                          155
 #define NSS_SRDS_N_RESET                               156
+#define CRYPTO_ENG1_RESET                              157
+#define CRYPTO_ENG2_RESET                              158
+#define CRYPTO_ENG3_RESET                              159
+#define CRYPTO_ENG4_RESET                              160
+#define CRYPTO_AHB_RESET                               161
 
 #endif
index 338aa27..edb7f6d 100644 (file)
@@ -80,12 +80,6 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
 
 #ifdef CONFIG_BALLOON_COMPACTION
 extern const struct address_space_operations balloon_aops;
-extern bool balloon_page_isolate(struct page *page,
-                               isolate_mode_t mode);
-extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct address_space *mapping,
-                               struct page *newpage,
-                               struct page *page, enum migrate_mode mode);
 
 /*
  * balloon_page_insert - insert a page into the balloon's page list and make
@@ -155,22 +149,6 @@ static inline void balloon_page_delete(struct page *page)
        list_del(&page->lru);
 }
 
-static inline bool balloon_page_isolate(struct page *page)
-{
-       return false;
-}
-
-static inline void balloon_page_putback(struct page *page)
-{
-       return;
-}
-
-static inline int balloon_page_migrate(struct page *newpage,
-                               struct page *page, enum migrate_mode mode)
-{
-       return 0;
-}
-
 static inline gfp_t balloon_mapping_gfp_mask(void)
 {
        return GFP_HIGHUSER;
index 2faa6f7..c10dc4c 100644 (file)
@@ -888,7 +888,7 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
 struct clk_mux {
        struct clk_hw   hw;
        void __iomem    *reg;
-       u32             *table;
+       const u32       *table;
        u32             mask;
        u8              shift;
        u8              flags;
@@ -913,18 +913,18 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
                const struct clk_hw **parent_hws,
                const struct clk_parent_data *parent_data,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
-               u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+               u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
 struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
                const char *name, u8 num_parents,
                const char * const *parent_names,
                const struct clk_hw **parent_hws,
                const struct clk_parent_data *parent_data,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
-               u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+               u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
                const char * const *parent_names, u8 num_parents,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
-               u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+               u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
 
 #define clk_register_mux(dev, name, parent_names, num_parents, flags, reg,    \
                         shift, width, clk_mux_flags, lock)                   \
@@ -962,9 +962,9 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
                              (shift), BIT((width)) - 1, (clk_mux_flags),     \
                              NULL, (lock))
 
-int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
+int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
                         unsigned int val);
-unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index);
+unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index);
 
 void clk_unregister_mux(struct clk *clk);
 void clk_hw_unregister_mux(struct clk_hw *hw);
@@ -1003,6 +1003,9 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
 struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
                const char *name, const char *parent_name, unsigned long flags,
                unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
+               const char *name, unsigned int index, unsigned long flags,
+               unsigned int mult, unsigned int div);
 /**
  * struct clk_fractional_divider - adjustable fractional divider clock
  *
index 266e8de..39faa54 100644 (file)
@@ -986,6 +986,17 @@ static inline void clk_bulk_disable_unprepare(int num_clks,
        clk_bulk_unprepare(num_clks, clks);
 }
 
+/**
+ * clk_drop_range - Reset any range set on that clock
+ * @clk: clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+static inline int clk_drop_range(struct clk *clk)
+{
+       return clk_set_rate_range(clk, 0, ULONG_MAX);
+}
+
 /**
  * clk_get_optional - lookup and obtain a reference to an optional clock
  *                   producer.
index cf32123..57c8ec4 100644 (file)
@@ -9,4 +9,6 @@
 int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
 int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
 
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
+
 #endif
index 90fd742..a6f6373 100644 (file)
  */
 #ifdef CONFIG_CMA_AREAS
 #define MAX_CMA_AREAS  (1 + CONFIG_CMA_AREAS)
-
-#else
-#define MAX_CMA_AREAS  (0)
-
 #endif
 
 #define CMA_MAX_NAME 64
index d44ff74..6727fb0 100644 (file)
@@ -456,6 +456,20 @@ int fscache_begin_read_operation(struct netfs_cache_resources *cres,
        return -ENOBUFS;
 }
 
+/**
+ * fscache_end_operation - End the read operation for the netfs lib
+ * @cres: The cache resources for the read operation
+ *
+ * Clean up the resources at the end of the read request.
+ */
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
+{
+       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+
+       if (ops)
+               ops->end_operation(cres);
+}
+
 /**
  * fscache_read - Start a read from the cache.
  * @cres: The cache resources to use
index 0fa17fb..761f8f1 100644 (file)
@@ -264,9 +264,7 @@ struct vm_area_struct;
 #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
 
 /* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (24 +                                         \
-                         3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) +        \
-                         IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /**
index b0728c8..98c9351 100644 (file)
@@ -168,13 +168,16 @@ struct gpio_irq_chip {
 
        /**
         * @parent_handler_data:
+        *
+        * If @per_parent_data is false, @parent_handler_data is a single
+        * pointer used as the data associated with every parent interrupt.
+        *
         * @parent_handler_data_array:
         *
-        * Data associated, and passed to, the handler for the parent
-        * interrupt. Can either be a single pointer if @per_parent_data
-        * is false, or an array of @num_parents pointers otherwise.  If
-        * @per_parent_data is true, @parent_handler_data_array cannot be
-        * NULL.
+        * If @per_parent_data is true, @parent_handler_data_array is
+        * an array of @num_parents pointers, and is used to associate
+        * different data for each parent. This cannot be NULL if
+        * @per_parent_data is true.
         */
        union {
                void *parent_handler_data;
index 9cb39d9..604a126 100644 (file)
@@ -85,7 +85,6 @@ struct i2c_dev_boardinfo {
  */
 struct i2c_dev_desc {
        struct i3c_i2c_dev_desc common;
-       const struct i2c_dev_boardinfo *boardinfo;
        struct i2c_client *dev;
        u16 addr;
        u8 lvr;
index 0354b29..49790c1 100644 (file)
@@ -475,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
 void input_alloc_absinfo(struct input_dev *dev);
 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
                          int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+                   const struct input_dev *src, unsigned int src_axis);
 
 #define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item)                   \
 static inline int input_abs_get_##_suffix(struct input_dev *dev,       \
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644 (file)
index 0000000..7e4b702
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS  24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ *                        codes, encoded rows/columns, etc) for the top
+ *                        row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+       u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+       unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+                                         char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
index 312ff99..1571687 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/ftrace.h>
 #include <linux/refcount.h>
 #include <linux/freelist.h>
+#include <linux/rethook.h>
 #include <asm/kprobes.h>
 
 #ifdef CONFIG_KPROBES
@@ -149,13 +150,20 @@ struct kretprobe {
        int maxactive;
        int nmissed;
        size_t data_size;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+       struct rethook *rh;
+#else
        struct freelist_head freelist;
        struct kretprobe_holder *rph;
+#endif
 };
 
 #define KRETPROBE_MAX_DATA_SIZE        4096
 
 struct kretprobe_instance {
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+       struct rethook_node node;
+#else
        union {
                struct freelist_node freelist;
                struct rcu_head rcu;
@@ -164,6 +172,7 @@ struct kretprobe_instance {
        struct kretprobe_holder *rph;
        kprobe_opcode_t *ret_addr;
        void *fp;
+#endif
        char data[];
 };
 
@@ -186,10 +195,24 @@ extern void kprobe_busy_begin(void);
 extern void kprobe_busy_end(void);
 
 #ifdef CONFIG_KRETPROBES
-extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
-                                  struct pt_regs *regs);
+/* Check whether @p is used for implementing a trampoline. */
 extern int arch_trampoline_kprobe(struct kprobe *p);
 
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+       RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+               "Kretprobe is accessed from instance under preemptive context");
+
+       return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
+}
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+       return ri->node.ret_addr;
+}
+#else
+extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+                                  struct pt_regs *regs);
 void arch_kretprobe_fixup_return(struct pt_regs *regs,
                                 kprobe_opcode_t *correct_ret_addr);
 
@@ -232,6 +255,12 @@ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance
        return READ_ONCE(ri->rph->rp);
 }
 
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+       return (unsigned long)ri->ret_addr;
+}
+#endif /* CONFIG_KRETPROBE_ON_RETHOOK */
+
 #else /* !CONFIG_KRETPROBES */
 static inline void arch_prepare_kretprobe(struct kretprobe *rp,
                                        struct pt_regs *regs)
@@ -395,7 +424,11 @@ void unregister_kretprobe(struct kretprobe *rp);
 int register_kretprobes(struct kretprobe **rps, int num);
 void unregister_kretprobes(struct kretprobe **rps, int num);
 
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+#define kprobe_flush_task(tk)  do {} while (0)
+#else
 void kprobe_flush_task(struct task_struct *tk);
+#endif
 
 void kprobe_free_init_mem(void);
 
@@ -509,6 +542,19 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr)
 #endif /* !CONFIG_OPTPROBES */
 
 #ifdef CONFIG_KRETPROBES
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+       return is_rethook_trampoline(addr);
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+                                     struct llist_node **cur)
+{
+       return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
+}
+#else
 static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
 {
        return (void *)addr == kretprobe_trampoline_addr();
@@ -516,6 +562,7 @@ static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
 
 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
                                      struct llist_node **cur);
+#endif
 #else
 static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
 {
index 7074aa9..0d61e07 100644 (file)
@@ -25,8 +25,6 @@ struct badrange {
 };
 
 enum {
-       /* when a dimm supports both PMEM and BLK access a label is required */
-       NDD_ALIASING = 0,
        /* unarmed memory devices may not persist writes */
        NDD_UNARMED = 1,
        /* locked memory devices should not be accessed */
@@ -35,8 +33,6 @@ enum {
        NDD_SECURITY_OVERWRITE = 3,
        /*  tracking whether or not there is a pending device reference */
        NDD_WORK_PENDING = 4,
-       /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */
-       NDD_NOBLK = 5,
        /* dimm supports namespace labels */
        NDD_LABELING = 6,
 
@@ -140,21 +136,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
 }
 
 struct nvdimm_bus;
-struct module;
-struct nd_blk_region;
-struct nd_blk_region_desc {
-       int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
-       int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
-                       void *iobuf, u64 len, int rw);
-       struct nd_region_desc ndr_desc;
-};
-
-static inline struct nd_blk_region_desc *to_blk_region_desc(
-               struct nd_region_desc *ndr_desc)
-{
-       return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
-
-}
 
 /*
  * Note that separate bits for locked + unlocked are defined so that
@@ -257,7 +238,6 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
 struct nvdimm *to_nvdimm(struct device *dev);
 struct nd_region *to_nd_region(struct device *dev);
 struct device *nd_region_dev(struct nd_region *nd_region);
-struct nd_blk_region *to_nd_blk_region(struct device *dev);
 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
 const char *nvdimm_name(struct nvdimm *nvdimm);
@@ -295,10 +275,6 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
                struct nd_region_desc *ndr_desc);
 void *nd_region_provider_data(struct nd_region *nd_region);
-void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
-void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
-struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
-unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
 unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
 u64 nd_fletcher64(void *addr, size_t len, bool le);
index 808bb4c..b0da04f 100644 (file)
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
    /* 2 values for divider stage reset, others for "testing purposes only" */
 #  define RTC_DIV_RESET1       0x60
 #  define RTC_DIV_RESET2       0x70
+   /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+#  define RTC_AMD_BANK_SELECT  0x10
   /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
 # define RTC_RATE_SELECT       0x0F
 
index 8a8c63e..b9771ba 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/ndctl.h>
 #include <linux/device.h>
 #include <linux/badblocks.h>
+#include <linux/perf_event.h>
 
 enum nvdimm_event {
        NVDIMM_REVALIDATE_POISON,
@@ -23,6 +24,57 @@ enum nvdimm_claim_class {
        NVDIMM_CCLASS_UNKNOWN,
 };
 
+#define NVDIMM_EVENT_VAR(_id)  event_attr_##_id
+#define NVDIMM_EVENT_PTR(_id)  (&event_attr_##_id.attr.attr)
+
+#define NVDIMM_EVENT_ATTR(_name, _id)                          \
+       PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id,       \
+                       nvdimm_events_sysfs_show)
+
+/* Event attribute array index */
+#define NVDIMM_PMU_FORMAT_ATTR 0
+#define NVDIMM_PMU_EVENT_ATTR  1
+#define NVDIMM_PMU_CPUMASK_ATTR        2
+#define NVDIMM_PMU_NULL_ATTR   3
+
+/**
+ * struct nvdimm_pmu - data structure for nvdimm perf driver
+ * @pmu: pmu data structure for nvdimm performance stats.
+ * @dev: nvdimm device pointer.
+ * @cpu: designated cpu for counter access.
+ * @node: node for cpu hotplug notifier link.
+ * @cpuhp_state: state for cpu hotplug notification.
+ * @arch_cpumask: cpumask to get designated cpu for counter access.
+ */
+struct nvdimm_pmu {
+       struct pmu pmu;
+       struct device *dev;
+       int cpu;
+       struct hlist_node node;
+       enum cpuhp_state cpuhp_state;
+       /* cpumask provided by arch/platform specific code */
+       struct cpumask arch_cpumask;
+};
+
+struct platform_device;
+
+#ifdef CONFIG_PERF_EVENTS
+extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *page);
+
+int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
+void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
+
+#else
+static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev)
+{
+       return -ENXIO;
+}
+
+static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { }
+#endif
+
 struct nd_device_driver {
        struct device_driver drv;
        unsigned long type;
@@ -92,27 +144,6 @@ struct nd_namespace_pmem {
        int id;
 };
 
-/**
- * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
- * @alt_name: namespace name supplied in the dimm label
- * @uuid: namespace name supplied in the dimm label
- * @id: ida allocated id
- * @lbasize: blk namespaces have a native sector size when btt not present
- * @size: sum of all the resource ranges allocated to this namespace
- * @num_resources: number of dpa extents to claim
- * @res: discontiguous dpa extents for given dimm
- */
-struct nd_namespace_blk {
-       struct nd_namespace_common common;
-       char *alt_name;
-       uuid_t *uuid;
-       int id;
-       unsigned long lbasize;
-       resource_size_t size;
-       int num_resources;
-       struct resource **res;
-};
-
 static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
 {
        return container_of(dev, struct nd_namespace_io, common.dev);
@@ -125,11 +156,6 @@ static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device
        return container_of(nsio, struct nd_namespace_pmem, nsio);
 }
 
-static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev)
-{
-       return container_of(dev, struct nd_namespace_blk, common.dev);
-}
-
 /**
  * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
  * @ndns: device to read
index 614f222..c7bf1ea 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/fs.h>
 #include <linux/pagemap.h>
 
+enum netfs_sreq_ref_trace;
+
 /*
  * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
  * a page is currently backed by a local disk cache
@@ -106,7 +108,7 @@ static inline int wait_on_page_fscache_killable(struct page *page)
        return folio_wait_private_2_killable(page_folio(page));
 }
 
-enum netfs_read_source {
+enum netfs_io_source {
        NETFS_FILL_WITH_ZEROES,
        NETFS_DOWNLOAD_FROM_SERVER,
        NETFS_READ_FROM_CACHE,
@@ -116,6 +118,17 @@ enum netfs_read_source {
 typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
                                      bool was_async);
 
+/*
+ * Per-inode description.  This must be directly after the inode struct.
+ */
+struct netfs_i_context {
+       const struct netfs_request_ops *ops;
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct fscache_cookie   *cache;
+#endif
+       loff_t                  remote_i_size;  /* Size of the remote file */
+};
+
 /*
  * Resources required to do operations on a cache.
  */
@@ -130,69 +143,75 @@ struct netfs_cache_resources {
 /*
  * Descriptor for a single component subrequest.
  */
-struct netfs_read_subrequest {
-       struct netfs_read_request *rreq;        /* Supervising read request */
+struct netfs_io_subrequest {
+       struct netfs_io_request *rreq;          /* Supervising I/O request */
        struct list_head        rreq_link;      /* Link in rreq->subrequests */
        loff_t                  start;          /* Where to start the I/O */
        size_t                  len;            /* Size of the I/O */
        size_t                  transferred;    /* Amount of data transferred */
-       refcount_t              usage;
+       refcount_t              ref;
        short                   error;          /* 0 or error that occurred */
        unsigned short          debug_index;    /* Index in list (for debugging output) */
-       enum netfs_read_source  source;         /* Where to read from */
+       enum netfs_io_source    source;         /* Where to read from/write to */
        unsigned long           flags;
-#define NETFS_SREQ_WRITE_TO_CACHE      0       /* Set if should write to cache */
+#define NETFS_SREQ_COPY_TO_CACHE       0       /* Set if should copy the data to the cache */
 #define NETFS_SREQ_CLEAR_TAIL          1       /* Set if the rest of the read should be cleared */
-#define NETFS_SREQ_SHORT_READ          2       /* Set if there was a short read from the cache */
+#define NETFS_SREQ_SHORT_IO            2       /* Set if the I/O was short */
 #define NETFS_SREQ_SEEK_DATA_READ      3       /* Set if ->read() should SEEK_DATA first */
 #define NETFS_SREQ_NO_PROGRESS         4       /* Set if we didn't manage to read any data */
 };
 
+enum netfs_io_origin {
+       NETFS_READAHEAD,                /* This read was triggered by readahead */
+       NETFS_READPAGE,                 /* This read is a synchronous read */
+       NETFS_READ_FOR_WRITE,           /* This read is to prepare a write */
+} __mode(byte);
+
 /*
- * Descriptor for a read helper request.  This is used to make multiple I/O
- * requests on a variety of sources and then stitch the result together.
+ * Descriptor for an I/O helper request.  This is used to make multiple I/O
+ * operations to a variety of data stores and then stitch the result together.
  */
-struct netfs_read_request {
+struct netfs_io_request {
        struct work_struct      work;
        struct inode            *inode;         /* The file being accessed */
        struct address_space    *mapping;       /* The mapping being accessed */
        struct netfs_cache_resources cache_resources;
-       struct list_head        subrequests;    /* Requests to fetch I/O from disk or net */
+       struct list_head        subrequests;    /* Contributory I/O operations */
        void                    *netfs_priv;    /* Private data for the netfs */
        unsigned int            debug_id;
-       atomic_t                nr_rd_ops;      /* Number of read ops in progress */
-       atomic_t                nr_wr_ops;      /* Number of write ops in progress */
+       atomic_t                nr_outstanding; /* Number of ops in progress */
+       atomic_t                nr_copy_ops;    /* Number of copy-to-cache ops in progress */
        size_t                  submitted;      /* Amount submitted for I/O so far */
        size_t                  len;            /* Length of the request */
        short                   error;          /* 0 or error that occurred */
+       enum netfs_io_origin    origin;         /* Origin of the request */
        loff_t                  i_size;         /* Size of the file */
        loff_t                  start;          /* Start position */
        pgoff_t                 no_unlock_folio; /* Don't unlock this folio after read */
-       refcount_t              usage;
+       refcount_t              ref;
        unsigned long           flags;
 #define NETFS_RREQ_INCOMPLETE_IO       0       /* Some ioreqs terminated short or with error */
-#define NETFS_RREQ_WRITE_TO_CACHE      1       /* Need to write to the cache */
+#define NETFS_RREQ_COPY_TO_CACHE       1       /* Need to write to the cache */
 #define NETFS_RREQ_NO_UNLOCK_FOLIO     2       /* Don't unlock no_unlock_folio on completion */
 #define NETFS_RREQ_DONT_UNLOCK_FOLIOS  3       /* Don't unlock the folios on completion */
 #define NETFS_RREQ_FAILED              4       /* The request failed */
 #define NETFS_RREQ_IN_PROGRESS         5       /* Unlocked when the request completes */
-       const struct netfs_read_request_ops *netfs_ops;
+       const struct netfs_request_ops *netfs_ops;
 };
 
 /*
  * Operations the network filesystem can/must provide to the helpers.
  */
-struct netfs_read_request_ops {
-       bool (*is_cache_enabled)(struct inode *inode);
-       void (*init_rreq)(struct netfs_read_request *rreq, struct file *file);
-       int (*begin_cache_operation)(struct netfs_read_request *rreq);
-       void (*expand_readahead)(struct netfs_read_request *rreq);
-       bool (*clamp_length)(struct netfs_read_subrequest *subreq);
-       void (*issue_op)(struct netfs_read_subrequest *subreq);
-       bool (*is_still_valid)(struct netfs_read_request *rreq);
+struct netfs_request_ops {
+       int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+       int (*begin_cache_operation)(struct netfs_io_request *rreq);
+       void (*expand_readahead)(struct netfs_io_request *rreq);
+       bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+       void (*issue_read)(struct netfs_io_subrequest *subreq);
+       bool (*is_still_valid)(struct netfs_io_request *rreq);
        int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                 struct folio *folio, void **_fsdata);
-       void (*done)(struct netfs_read_request *rreq);
+       void (*done)(struct netfs_io_request *rreq);
        void (*cleanup)(struct address_space *mapping, void *netfs_priv);
 };
 
@@ -235,7 +254,7 @@ struct netfs_cache_ops {
        /* Prepare a read operation, shortening it to a cached/uncached
         * boundary as appropriate.
         */
-       enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
+       enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
                                               loff_t i_size);
 
        /* Prepare a write operation, working out what part of the write we can
@@ -254,20 +273,89 @@ struct netfs_cache_ops {
 };
 
 struct readahead_control;
-extern void netfs_readahead(struct readahead_control *,
-                           const struct netfs_read_request_ops *,
-                           void *);
-extern int netfs_readpage(struct file *,
-                         struct folio *,
-                         const struct netfs_read_request_ops *,
-                         void *);
+extern void netfs_readahead(struct readahead_control *);
+extern int netfs_readpage(struct file *, struct page *);
 extern int netfs_write_begin(struct file *, struct address_space *,
                             loff_t, unsigned int, unsigned int, struct folio **,
-                            void **,
-                            const struct netfs_read_request_ops *,
-                            void *);
+                            void **);
 
-extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool);
+extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+                                enum netfs_sreq_ref_trace what);
+extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+                                bool was_async, enum netfs_sreq_ref_trace what);
 extern void netfs_stats_show(struct seq_file *);
 
+/**
+ * netfs_i_context - Get the netfs inode context from the inode
+ * @inode: The inode to query
+ *
+ * Get the netfs lib inode context from the network filesystem's inode.  The
+ * context struct is expected to directly follow on from the VFS inode struct.
+ */
+static inline struct netfs_i_context *netfs_i_context(struct inode *inode)
+{
+       return (struct netfs_i_context *)(inode + 1);
+}
+
+/**
+ * netfs_inode - Get the netfs inode from the inode context
+ * @ctx: The context to query
+ *
+ * Get the netfs inode from the netfs library's inode context.  The VFS inode
+ * is expected to directly precede the context struct.
+ */
+static inline struct inode *netfs_inode(struct netfs_i_context *ctx)
+{
+       return ((struct inode *)ctx) - 1;
+}
+
+/**
+ * netfs_i_context_init - Initialise a netfs lib context
+ * @inode: The inode with which the context is associated
+ * @ops: The netfs's operations list
+ *
+ * Initialise the netfs library context struct.  This is expected to follow on
+ * directly from the VFS inode struct.
+ */
+static inline void netfs_i_context_init(struct inode *inode,
+                                       const struct netfs_request_ops *ops)
+{
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->ops = ops;
+       ctx->remote_i_size = i_size_read(inode);
+}
+
+/**
+ * netfs_resize_file - Note that a file got resized
+ * @inode: The inode being resized
+ * @new_i_size: The new file size
+ *
+ * Inform the netfs lib that a file got resized so that it can adjust its state.
+ */
+static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size)
+{
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+
+       ctx->remote_i_size = new_i_size;
+}
+
+/**
+ * netfs_i_cookie - Get the cache cookie from the inode
+ * @inode: The inode to query
+ *
+ * Get the caching cookie (if enabled) from the network filesystem's inode.
+ */
+static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+       return ctx->cache;
+#else
+       return NULL;
+#endif
+}
+
 #endif /* _LINUX_NETFS_H */
index e0600e1..7c943f0 100644 (file)
@@ -523,6 +523,7 @@ struct rproc_dump_segment {
  * @table_sz: size of @cached_table
  * @has_iommu: flag to indicate if remote processor is behind an MMU
  * @auto_boot: flag to indicate if remote processor should be auto-started
+ * @sysfs_read_only: flag to make remoteproc sysfs files read only
  * @dump_segments: list of segments in the firmware
  * @nb_vdev: number of vdev currently handled by rproc
  * @elf_class: firmware ELF class
@@ -562,6 +563,7 @@ struct rproc {
        size_t table_sz;
        bool has_iommu;
        bool auto_boot;
+       bool sysfs_read_only;
        struct list_head dump_segments;
        int nb_vdev;
        u8 elf_class;
@@ -669,10 +671,11 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
                             u32 da, const char *name, ...);
 
 int rproc_boot(struct rproc *rproc);
-void rproc_shutdown(struct rproc *rproc);
+int rproc_shutdown(struct rproc *rproc);
 int rproc_detach(struct rproc *rproc);
 int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
 void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
 void rproc_coredump_using_sections(struct rproc *rproc);
 int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
 int rproc_coredump_add_custom_segment(struct rproc *rproc,
index 47fd1c2..1fd9c6a 100644 (file)
@@ -110,8 +110,6 @@ struct rtc_device {
        struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
        int pie_enabled;
        struct work_struct irqwork;
-       /* Some hardware can't support UIE mode */
-       int uie_unsupported;
 
        /*
         * This offset specifies the update timing of the RTC.
index 67ee9d2..5a41c3b 100644 (file)
@@ -46,7 +46,6 @@ struct ds1685_priv {
        u32 regstep;
        int irq_num;
        bool bcd_mode;
-       bool no_irq;
        u8 (*read)(struct ds1685_priv *, int);
        void (*write)(struct ds1685_priv *, int, u8);
        void (*prepare_poweroff)(void);
index 860dd8c..82c9d48 100644 (file)
@@ -40,6 +40,7 @@ struct qcom_smd_rpm;
 #define QCOM_SMD_RPM_AGGR_CLK  0x72676761
 #define QCOM_SMD_RPM_HWKM_CLK  0x6d6b7768
 #define QCOM_SMD_RPM_PKA_CLK   0x616b70
+#define QCOM_SMD_RPM_MCFG_CLK  0x6766636d
 
 int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
                       int state,
index 721089b..8943a20 100644 (file)
@@ -83,7 +83,7 @@ struct vdpa_device {
        unsigned int index;
        bool features_valid;
        bool use_va;
-       int nvqs;
+       u32 nvqs;
        struct vdpa_mgmt_dev *mdev;
 };
 
@@ -207,7 +207,8 @@ struct vdpa_map_file {
  * @reset:                     Reset device
  *                             @vdev: vdpa device
  *                             Returns integer: success (0) or error (< 0)
- * @get_config_size:           Get the size of the configuration space
+ * @get_config_size:           Get the size of the configuration space includes
+ *                             fields that are conditional on feature bits.
  *                             @vdev: vdpa device
  *                             Returns size_t: configuration size
  * @get_config:                        Read from device specific configuration space
@@ -337,10 +338,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                       dev_struct, member)), name, use_va), \
                                       dev_struct, member)
 
-int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
+int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
 void vdpa_unregister_device(struct vdpa_device *vdev);
 
-int _vdpa_register_device(struct vdpa_device *vdev, int nvqs);
+int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
 void _vdpa_unregister_device(struct vdpa_device *vdev);
 
 /**
index bb52b78..72feab5 100644 (file)
@@ -9,6 +9,7 @@
  * See Documentation/core-api/xarray.rst for how to use the XArray.
  */
 
+#include <linux/bitmap.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/gfp.h>
index 314f277..6b99310 100644 (file)
@@ -402,6 +402,7 @@ struct snd_pcm_runtime {
        struct fasync_struct *fasync;
        bool stop_operating;            /* sync_stop will be called */
        struct mutex buffer_mutex;      /* protect for buffer changes */
+       atomic_t buffer_accessing;      /* >0: in r/w operation, <0: blocked */
 
        /* -- private section -- */
        void *private_data;
index 2c53063..311c14a 100644 (file)
@@ -426,8 +426,8 @@ TRACE_EVENT(cachefiles_vol_coherency,
            );
 
 TRACE_EVENT(cachefiles_prep_read,
-           TP_PROTO(struct netfs_read_subrequest *sreq,
-                    enum netfs_read_source source,
+           TP_PROTO(struct netfs_io_subrequest *sreq,
+                    enum netfs_io_source source,
                     enum cachefiles_prepare_read_trace why,
                     ino_t cache_inode),
 
@@ -437,7 +437,7 @@ TRACE_EVENT(cachefiles_prep_read,
                    __field(unsigned int,               rreq            )
                    __field(unsigned short,             index           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum cachefiles_prepare_read_trace, why     )
                    __field(size_t,                     len             )
                    __field(loff_t,                     start           )
index e6f4ebb..beec534 100644 (file)
 /*
  * Define enums for tracing information.
  */
-#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-
-enum netfs_read_trace {
-       netfs_read_trace_expanded,
-       netfs_read_trace_readahead,
-       netfs_read_trace_readpage,
-       netfs_read_trace_write_begin,
-};
-
-enum netfs_rreq_trace {
-       netfs_rreq_trace_assess,
-       netfs_rreq_trace_done,
-       netfs_rreq_trace_free,
-       netfs_rreq_trace_resubmit,
-       netfs_rreq_trace_unlock,
-       netfs_rreq_trace_unmark,
-       netfs_rreq_trace_write,
-};
-
-enum netfs_sreq_trace {
-       netfs_sreq_trace_download_instead,
-       netfs_sreq_trace_free,
-       netfs_sreq_trace_prepare,
-       netfs_sreq_trace_resubmit_short,
-       netfs_sreq_trace_submit,
-       netfs_sreq_trace_terminated,
-       netfs_sreq_trace_write,
-       netfs_sreq_trace_write_skip,
-       netfs_sreq_trace_write_term,
-};
-
-enum netfs_failure {
-       netfs_fail_check_write_begin,
-       netfs_fail_copy_to_cache,
-       netfs_fail_read,
-       netfs_fail_short_readpage,
-       netfs_fail_short_write_begin,
-       netfs_fail_prepare_write,
-};
-
-#endif
-
 #define netfs_read_traces                                      \
        EM(netfs_read_trace_expanded,           "EXPANDED ")    \
        EM(netfs_read_trace_readahead,          "READAHEAD")    \
        EM(netfs_read_trace_readpage,           "READPAGE ")    \
        E_(netfs_read_trace_write_begin,        "WRITEBEGN")
 
+#define netfs_rreq_origins                                     \
+       EM(NETFS_READAHEAD,                     "RA")           \
+       EM(NETFS_READPAGE,                      "RP")           \
+       E_(NETFS_READ_FOR_WRITE,                "RW")
+
 #define netfs_rreq_traces                                      \
-       EM(netfs_rreq_trace_assess,             "ASSESS")       \
-       EM(netfs_rreq_trace_done,               "DONE  ")       \
-       EM(netfs_rreq_trace_free,               "FREE  ")       \
-       EM(netfs_rreq_trace_resubmit,           "RESUBM")       \
-       EM(netfs_rreq_trace_unlock,             "UNLOCK")       \
-       EM(netfs_rreq_trace_unmark,             "UNMARK")       \
-       E_(netfs_rreq_trace_write,              "WRITE ")
+       EM(netfs_rreq_trace_assess,             "ASSESS ")      \
+       EM(netfs_rreq_trace_copy,               "COPY   ")      \
+       EM(netfs_rreq_trace_done,               "DONE   ")      \
+       EM(netfs_rreq_trace_free,               "FREE   ")      \
+       EM(netfs_rreq_trace_resubmit,           "RESUBMT")      \
+       EM(netfs_rreq_trace_unlock,             "UNLOCK ")      \
+       E_(netfs_rreq_trace_unmark,             "UNMARK ")
 
 #define netfs_sreq_sources                                     \
        EM(NETFS_FILL_WITH_ZEROES,              "ZERO")         \
@@ -94,10 +56,47 @@ enum netfs_failure {
        EM(netfs_fail_check_write_begin,        "check-write-begin")    \
        EM(netfs_fail_copy_to_cache,            "copy-to-cache")        \
        EM(netfs_fail_read,                     "read")                 \
-       EM(netfs_fail_short_readpage,           "short-readpage")       \
-       EM(netfs_fail_short_write_begin,        "short-write-begin")    \
+       EM(netfs_fail_short_read,               "short-read")           \
        E_(netfs_fail_prepare_write,            "prep-write")
 
+#define netfs_rreq_ref_traces                                  \
+       EM(netfs_rreq_trace_get_hold,           "GET HOLD   ")  \
+       EM(netfs_rreq_trace_get_subreq,         "GET SUBREQ ")  \
+       EM(netfs_rreq_trace_put_complete,       "PUT COMPLT ")  \
+       EM(netfs_rreq_trace_put_discard,        "PUT DISCARD")  \
+       EM(netfs_rreq_trace_put_failed,         "PUT FAILED ")  \
+       EM(netfs_rreq_trace_put_hold,           "PUT HOLD   ")  \
+       EM(netfs_rreq_trace_put_subreq,         "PUT SUBREQ ")  \
+       EM(netfs_rreq_trace_put_zero_len,       "PUT ZEROLEN")  \
+       E_(netfs_rreq_trace_new,                "NEW        ")
+
+#define netfs_sreq_ref_traces                                  \
+       EM(netfs_sreq_trace_get_copy_to_cache,  "GET COPY2C ")  \
+       EM(netfs_sreq_trace_get_resubmit,       "GET RESUBMIT") \
+       EM(netfs_sreq_trace_get_short_read,     "GET SHORTRD")  \
+       EM(netfs_sreq_trace_new,                "NEW        ")  \
+       EM(netfs_sreq_trace_put_clear,          "PUT CLEAR  ")  \
+       EM(netfs_sreq_trace_put_failed,         "PUT FAILED ")  \
+       EM(netfs_sreq_trace_put_merged,         "PUT MERGED ")  \
+       EM(netfs_sreq_trace_put_no_copy,        "PUT NO COPY")  \
+       E_(netfs_sreq_trace_put_terminated,     "PUT TERM   ")
+
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
+enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
+enum netfs_failure { netfs_failures } __mode(byte);
+enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
+enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+
+#endif
 
 /*
  * Export enum symbols via userspace.
@@ -108,10 +107,13 @@ enum netfs_failure {
 #define E_(a, b) TRACE_DEFINE_ENUM(a);
 
 netfs_read_traces;
+netfs_rreq_origins;
 netfs_rreq_traces;
 netfs_sreq_sources;
 netfs_sreq_traces;
 netfs_failures;
+netfs_rreq_ref_traces;
+netfs_sreq_ref_traces;
 
 /*
  * Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -123,7 +125,7 @@ netfs_failures;
 #define E_(a, b)       { a, b }
 
 TRACE_EVENT(netfs_read,
-           TP_PROTO(struct netfs_read_request *rreq,
+           TP_PROTO(struct netfs_io_request *rreq,
                     loff_t start, size_t len,
                     enum netfs_read_trace what),
 
@@ -156,31 +158,34 @@ TRACE_EVENT(netfs_read,
            );
 
 TRACE_EVENT(netfs_rreq,
-           TP_PROTO(struct netfs_read_request *rreq,
+           TP_PROTO(struct netfs_io_request *rreq,
                     enum netfs_rreq_trace what),
 
            TP_ARGS(rreq, what),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               rreq            )
-                   __field(unsigned short,             flags           )
+                   __field(unsigned int,               flags           )
+                   __field(enum netfs_io_origin,       origin          )
                    __field(enum netfs_rreq_trace,      what            )
                             ),
 
            TP_fast_assign(
                    __entry->rreq       = rreq->debug_id;
                    __entry->flags      = rreq->flags;
+                   __entry->origin     = rreq->origin;
                    __entry->what       = what;
                           ),
 
-           TP_printk("R=%08x %s f=%02x",
+           TP_printk("R=%08x %s %s f=%02x",
                      __entry->rreq,
+                     __print_symbolic(__entry->origin, netfs_rreq_origins),
                      __print_symbolic(__entry->what, netfs_rreq_traces),
                      __entry->flags)
            );
 
 TRACE_EVENT(netfs_sreq,
-           TP_PROTO(struct netfs_read_subrequest *sreq,
+           TP_PROTO(struct netfs_io_subrequest *sreq,
                     enum netfs_sreq_trace what),
 
            TP_ARGS(sreq, what),
@@ -190,7 +195,7 @@ TRACE_EVENT(netfs_sreq,
                    __field(unsigned short,             index           )
                    __field(short,                      error           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum netfs_sreq_trace,      what            )
                    __field(size_t,                     len             )
                    __field(size_t,                     transferred     )
@@ -211,26 +216,26 @@ TRACE_EVENT(netfs_sreq,
 
            TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
                      __entry->rreq, __entry->index,
-                     __print_symbolic(__entry->what, netfs_sreq_traces),
                      __print_symbolic(__entry->source, netfs_sreq_sources),
+                     __print_symbolic(__entry->what, netfs_sreq_traces),
                      __entry->flags,
                      __entry->start, __entry->transferred, __entry->len,
                      __entry->error)
            );
 
 TRACE_EVENT(netfs_failure,
-           TP_PROTO(struct netfs_read_request *rreq,
-                    struct netfs_read_subrequest *sreq,
+           TP_PROTO(struct netfs_io_request *rreq,
+                    struct netfs_io_subrequest *sreq,
                     int error, enum netfs_failure what),
 
            TP_ARGS(rreq, sreq, error, what),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               rreq            )
-                   __field(unsigned short,             index           )
+                   __field(short,                      index           )
                    __field(short,                      error           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum netfs_failure,         what            )
                    __field(size_t,                     len             )
                    __field(size_t,                     transferred     )
@@ -239,17 +244,17 @@ TRACE_EVENT(netfs_failure,
 
            TP_fast_assign(
                    __entry->rreq       = rreq->debug_id;
-                   __entry->index      = sreq ? sreq->debug_index : 0;
+                   __entry->index      = sreq ? sreq->debug_index : -1;
                    __entry->error      = error;
                    __entry->flags      = sreq ? sreq->flags : 0;
                    __entry->source     = sreq ? sreq->source : NETFS_INVALID_READ;
                    __entry->what       = what;
-                   __entry->len        = sreq ? sreq->len : 0;
+                   __entry->len        = sreq ? sreq->len : rreq->len;
                    __entry->transferred = sreq ? sreq->transferred : 0;
                    __entry->start      = sreq ? sreq->start : 0;
                           ),
 
-           TP_printk("R=%08x[%u] %s f=%02x s=%llx %zx/%zx %s e=%d",
+           TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d",
                      __entry->rreq, __entry->index,
                      __print_symbolic(__entry->source, netfs_sreq_sources),
                      __entry->flags,
@@ -258,6 +263,59 @@ TRACE_EVENT(netfs_failure,
                      __entry->error)
            );
 
+TRACE_EVENT(netfs_rreq_ref,
+           TP_PROTO(unsigned int rreq_debug_id, int ref,
+                    enum netfs_rreq_ref_trace what),
+
+           TP_ARGS(rreq_debug_id, ref, what),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               rreq            )
+                   __field(int,                        ref             )
+                   __field(enum netfs_rreq_ref_trace,  what            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rreq       = rreq_debug_id;
+                   __entry->ref        = ref;
+                   __entry->what       = what;
+                          ),
+
+           TP_printk("R=%08x %s r=%u",
+                     __entry->rreq,
+                     __print_symbolic(__entry->what, netfs_rreq_ref_traces),
+                     __entry->ref)
+           );
+
+TRACE_EVENT(netfs_sreq_ref,
+           TP_PROTO(unsigned int rreq_debug_id, unsigned int subreq_debug_index,
+                    int ref, enum netfs_sreq_ref_trace what),
+
+           TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               rreq            )
+                   __field(unsigned int,               subreq          )
+                   __field(int,                        ref             )
+                   __field(enum netfs_sreq_ref_trace,  what            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rreq       = rreq_debug_id;
+                   __entry->subreq     = subreq_debug_index;
+                   __entry->ref        = ref;
+                   __entry->what       = what;
+                          ),
+
+           TP_printk("R=%08x[%x] %s r=%u",
+                     __entry->rreq,
+                     __entry->subreq,
+                     __print_symbolic(__entry->what, netfs_sreq_ref_traces),
+                     __entry->ref)
+           );
+
+#undef EM
+#undef E_
 #endif /* _TRACE_NETFS_H */
 
 /* This part must be outside protection */
index e70c901..4a3ab0e 100644 (file)
@@ -83,12 +83,15 @@ enum rxrpc_call_trace {
        rxrpc_call_error,
        rxrpc_call_got,
        rxrpc_call_got_kernel,
+       rxrpc_call_got_timer,
        rxrpc_call_got_userid,
        rxrpc_call_new_client,
        rxrpc_call_new_service,
        rxrpc_call_put,
        rxrpc_call_put_kernel,
        rxrpc_call_put_noqueue,
+       rxrpc_call_put_notimer,
+       rxrpc_call_put_timer,
        rxrpc_call_put_userid,
        rxrpc_call_queued,
        rxrpc_call_queued_ref,
@@ -278,12 +281,15 @@ enum rxrpc_tx_point {
        EM(rxrpc_call_error,                    "*E*") \
        EM(rxrpc_call_got,                      "GOT") \
        EM(rxrpc_call_got_kernel,               "Gke") \
+       EM(rxrpc_call_got_timer,                "GTM") \
        EM(rxrpc_call_got_userid,               "Gus") \
        EM(rxrpc_call_new_client,               "NWc") \
        EM(rxrpc_call_new_service,              "NWs") \
        EM(rxrpc_call_put,                      "PUT") \
        EM(rxrpc_call_put_kernel,               "Pke") \
-       EM(rxrpc_call_put_noqueue,              "PNQ") \
+       EM(rxrpc_call_put_noqueue,              "PnQ") \
+       EM(rxrpc_call_put_notimer,              "PnT") \
+       EM(rxrpc_call_put_timer,                "PTM") \
        EM(rxrpc_call_put_userid,               "Pus") \
        EM(rxrpc_call_queued,                   "QUE") \
        EM(rxrpc_call_queued_ref,               "QUR") \
index 8cf1e48..17e02b6 100644 (file)
@@ -189,7 +189,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
 #define ND_DEVICE_REGION_BLK 3      /* nd_region: (parent of BLK namespaces) */
 #define ND_DEVICE_NAMESPACE_IO 4    /* legacy persistent memory */
 #define ND_DEVICE_NAMESPACE_PMEM 5  /* PMEM namespace (may alias with BLK) */
-#define ND_DEVICE_NAMESPACE_BLK 6   /* BLK namespace (may alias with PMEM) */
 #define ND_DEVICE_DAX_PMEM 7        /* Device DAX interface to pmem */
 
 enum nd_driver_flags {
@@ -198,7 +197,6 @@ enum nd_driver_flags {
        ND_DRIVER_REGION_BLK      = 1 << ND_DEVICE_REGION_BLK,
        ND_DRIVER_NAMESPACE_IO    = 1 << ND_DEVICE_NAMESPACE_IO,
        ND_DRIVER_NAMESPACE_PMEM  = 1 << ND_DEVICE_NAMESPACE_PMEM,
-       ND_DRIVER_NAMESPACE_BLK   = 1 << ND_DEVICE_NAMESPACE_BLK,
        ND_DRIVER_DAX_PMEM        = 1 << ND_DEVICE_DAX_PMEM,
 };
 
index f5ca874..1637e68 100644 (file)
@@ -33,4 +33,14 @@ struct rpmsg_endpoint_info {
  */
 #define RPMSG_DESTROY_EPT_IOCTL        _IO(0xb5, 0x2)
 
+/**
+ * Instantiate a new local rpmsg service device.
+ */
+#define RPMSG_CREATE_DEV_IOCTL _IOW(0xb5, 0x3, struct rpmsg_endpoint_info)
+
+/**
+ * Release a local rpmsg device.
+ */
+#define RPMSG_RELEASE_DEV_IOCTL        _IOW(0xb5, 0x4, struct rpmsg_endpoint_info)
+
 #endif
index 03e5b77..97aca45 100644 (file)
@@ -133,7 +133,8 @@ struct rtc_param {
 #define RTC_FEATURE_UPDATE_INTERRUPT   4
 #define RTC_FEATURE_CORRECTION         5
 #define RTC_FEATURE_BACKUP_SWITCH_MODE 6
-#define RTC_FEATURE_CNT                        7
+#define RTC_FEATURE_ALARM_WAKEUP_ONLY  7
+#define RTC_FEATURE_CNT                        8
 
 /* parameter list */
 #define RTC_PARAM_FEATURES             0
index c998860..5d99e7c 100644 (file)
 /* Get the valid iova range */
 #define VHOST_VDPA_GET_IOVA_RANGE      _IOR(VHOST_VIRTIO, 0x78, \
                                             struct vhost_vdpa_iova_range)
+
+/* Get the config size */
+#define VHOST_VDPA_GET_CONFIG_SIZE     _IOR(VHOST_VIRTIO, 0x79, __u32)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
+
 #endif
index b5eda06..f0fb0ae 100644 (file)
 /* This feature indicates support for the packed virtqueue layout. */
 #define VIRTIO_F_RING_PACKED           34
 
+/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER              35
+
 /*
  * This feature indicates that memory accesses by the driver and the
  * device are ordered in a way described by the platform.
index a03932f..71a54a6 100644 (file)
@@ -37,6 +37,7 @@
 #define VIRTIO_CRYPTO_SERVICE_HASH   1
 #define VIRTIO_CRYPTO_SERVICE_MAC    2
 #define VIRTIO_CRYPTO_SERVICE_AEAD   3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
 
 #define VIRTIO_CRYPTO_OPCODE(service, op)   (((service) << 8) | (op))
 
@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
 #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
        __le32 opcode;
        __le32 algo;
        __le32 flag;
@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req {
        __u8 padding[32];
 };
 
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING   0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+       __le32 padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH   0
+#define VIRTIO_CRYPTO_RSA_MD2       1
+#define VIRTIO_CRYPTO_RSA_MD3       2
+#define VIRTIO_CRYPTO_RSA_MD4       3
+#define VIRTIO_CRYPTO_RSA_MD5       4
+#define VIRTIO_CRYPTO_RSA_SHA1      5
+#define VIRTIO_CRYPTO_RSA_SHA256    6
+#define VIRTIO_CRYPTO_RSA_SHA384    7
+#define VIRTIO_CRYPTO_RSA_SHA512    8
+#define VIRTIO_CRYPTO_RSA_SHA224    9
+       __le32 hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN   0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+       __le32 curve_id;
+       __le32 padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER    0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA   1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA   2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+       __le32 algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC  1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+       __le32 keytype;
+       __le32 keylen;
+
+       union {
+               struct virtio_crypto_rsa_session_para rsa;
+               struct virtio_crypto_ecdsa_session_para ecdsa;
+       } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+       struct virtio_crypto_akcipher_session_para para;
+       __u8 padding[36];
+};
+
 struct virtio_crypto_alg_chain_session_para {
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER  1
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH  2
@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req {
                        mac_create_session;
                struct virtio_crypto_aead_create_session_req
                        aead_create_session;
+               struct virtio_crypto_akcipher_create_session_req
+                       akcipher_create_session;
                struct virtio_crypto_destroy_session_req
                        destroy_session;
                __u8 padding[56];
@@ -266,6 +325,14 @@ struct virtio_crypto_op_header {
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
 #define VIRTIO_CRYPTO_AEAD_DECRYPT \
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
        __le32 opcode;
        /* algo should be service-specific algorithms */
        __le32 algo;
@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req {
        __u8 padding[32];
 };
 
+struct virtio_crypto_akcipher_para {
+       __le32 src_data_len;
+       __le32 dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+       struct virtio_crypto_akcipher_para para;
+       __u8 padding[40];
+};
+
 /* The request of the data virtqueue's packet */
 struct virtio_crypto_op_data_req {
        struct virtio_crypto_op_header header;
@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req {
                struct virtio_crypto_hash_data_req hash_req;
                struct virtio_crypto_mac_data_req mac_req;
                struct virtio_crypto_aead_data_req aead_req;
+               struct virtio_crypto_akcipher_data_req akcipher_req;
                __u8 padding[48];
        } u;
 };
@@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req {
 #define VIRTIO_CRYPTO_BADMSG    2
 #define VIRTIO_CRYPTO_NOTSUPP   3
 #define VIRTIO_CRYPTO_INVSESS   4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC     5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
 
 /* The accelerator hardware is ready */
 #define VIRTIO_CRYPTO_S_HW_READY  (1 << 0)
@@ -438,7 +518,7 @@ struct virtio_crypto_config {
        __le32 max_cipher_key_len;
        /* Maximum length of authenticated key */
        __le32 max_auth_key_len;
-       __le32 reserve;
+       __le32 akcipher_algo;
        /* Maximum size of each crypto request's content */
        __le64 max_size;
 };
index 97463a3..ddcbefe 100644 (file)
@@ -62,13 +62,13 @@ config LLD_VERSION
 
 config CC_CAN_LINK
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag)) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag))
 
 config CC_CAN_LINK_STATIC
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static)
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag) -static) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag) -static)
 
 config CC_HAS_ASM_GOTO
        def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
index 56f4ee9..471d719 100644 (file)
@@ -108,6 +108,7 @@ obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_TRACE_CLOCK) += trace/
 obj-$(CONFIG_RING_BUFFER) += trace/
 obj-$(CONFIG_TRACEPOINTS) += trace/
+obj-$(CONFIG_RETHOOK) += trace/
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_CPU_PM) += cpu_pm.o
 obj-$(CONFIG_BPF) += bpf/
index 24788ce..0918a39 100644 (file)
@@ -5507,7 +5507,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
        }
        args = (const struct btf_param *)(func + 1);
        nargs = btf_type_vlen(func);
-       if (nargs >= MAX_BPF_FUNC_ARGS) {
+       if (nargs > MAX_BPF_FUNC_ARGS) {
                bpf_log(log,
                        "The function %s has %d arguments. Too many.\n",
                        tname, nargs);
index 185badc..dbe57df 100644 (file)
@@ -1237,6 +1237,27 @@ void kprobes_inc_nmissed_count(struct kprobe *p)
 }
 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
 
+static struct kprobe kprobe_busy = {
+       .addr = (void *) get_kprobe,
+};
+
+void kprobe_busy_begin(void)
+{
+       struct kprobe_ctlblk *kcb;
+
+       preempt_disable();
+       __this_cpu_write(current_kprobe, &kprobe_busy);
+       kcb = get_kprobe_ctlblk();
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+}
+
+void kprobe_busy_end(void)
+{
+       __this_cpu_write(current_kprobe, NULL);
+       preempt_enable();
+}
+
+#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
 static void free_rp_inst_rcu(struct rcu_head *head)
 {
        struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
@@ -1258,26 +1279,6 @@ static void recycle_rp_inst(struct kretprobe_instance *ri)
 }
 NOKPROBE_SYMBOL(recycle_rp_inst);
 
-static struct kprobe kprobe_busy = {
-       .addr = (void *) get_kprobe,
-};
-
-void kprobe_busy_begin(void)
-{
-       struct kprobe_ctlblk *kcb;
-
-       preempt_disable();
-       __this_cpu_write(current_kprobe, &kprobe_busy);
-       kcb = get_kprobe_ctlblk();
-       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-}
-
-void kprobe_busy_end(void)
-{
-       __this_cpu_write(current_kprobe, NULL);
-       preempt_enable();
-}
-
 /*
  * This function is called from delayed_put_task_struct() when a task is
  * dead and cleaned up to recycle any kretprobe instances associated with
@@ -1327,6 +1328,7 @@ static inline void free_rp_inst(struct kretprobe *rp)
                rp->rph = NULL;
        }
 }
+#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
 
 /* Add the new probe to 'ap->list'. */
 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
@@ -1925,6 +1927,7 @@ static struct notifier_block kprobe_exceptions_nb = {
 
 #ifdef CONFIG_KRETPROBES
 
+#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
 /* This assumes the 'tsk' is the current task or the is not running. */
 static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
                                                  struct llist_node **cur)
@@ -2087,6 +2090,57 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
        return 0;
 }
 NOKPROBE_SYMBOL(pre_handler_kretprobe);
+#else /* CONFIG_KRETPROBE_ON_RETHOOK */
+/*
+ * This kprobe pre_handler is registered with every kretprobe. When probe
+ * hits it will set up the return probe.
+ */
+static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
+{
+       struct kretprobe *rp = container_of(p, struct kretprobe, kp);
+       struct kretprobe_instance *ri;
+       struct rethook_node *rhn;
+
+       rhn = rethook_try_get(rp->rh);
+       if (!rhn) {
+               rp->nmissed++;
+               return 0;
+       }
+
+       ri = container_of(rhn, struct kretprobe_instance, node);
+
+       if (rp->entry_handler && rp->entry_handler(ri, regs))
+               rethook_recycle(rhn);
+       else
+               rethook_hook(rhn, regs, kprobe_ftrace(p));
+
+       return 0;
+}
+NOKPROBE_SYMBOL(pre_handler_kretprobe);
+
+static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
+                                     struct pt_regs *regs)
+{
+       struct kretprobe *rp = (struct kretprobe *)data;
+       struct kretprobe_instance *ri;
+       struct kprobe_ctlblk *kcb;
+
+       /* The data must NOT be null. This means rethook data structure is broken. */
+       if (WARN_ON_ONCE(!data))
+               return;
+
+       __this_cpu_write(current_kprobe, &rp->kp);
+       kcb = get_kprobe_ctlblk();
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+       ri = container_of(rh, struct kretprobe_instance, node);
+       rp->handler(ri, regs);
+
+       __this_cpu_write(current_kprobe, NULL);
+}
+NOKPROBE_SYMBOL(kretprobe_rethook_handler);
+
+#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
 
 /**
  * kprobe_on_func_entry() -- check whether given address is function entry
@@ -2155,6 +2209,29 @@ int register_kretprobe(struct kretprobe *rp)
                rp->maxactive = num_possible_cpus();
 #endif
        }
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+       rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler);
+       if (!rp->rh)
+               return -ENOMEM;
+
+       for (i = 0; i < rp->maxactive; i++) {
+               inst = kzalloc(sizeof(struct kretprobe_instance) +
+                              rp->data_size, GFP_KERNEL);
+               if (inst == NULL) {
+                       rethook_free(rp->rh);
+                       rp->rh = NULL;
+                       return -ENOMEM;
+               }
+               rethook_add_node(rp->rh, &inst->node);
+       }
+       rp->nmissed = 0;
+       /* Establish function entry probe point */
+       ret = register_kprobe(&rp->kp);
+       if (ret != 0) {
+               rethook_free(rp->rh);
+               rp->rh = NULL;
+       }
+#else  /* !CONFIG_KRETPROBE_ON_RETHOOK */
        rp->freelist.head = NULL;
        rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
        if (!rp->rph)
@@ -2179,6 +2256,7 @@ int register_kretprobe(struct kretprobe *rp)
        ret = register_kprobe(&rp->kp);
        if (ret != 0)
                free_rp_inst(rp);
+#endif
        return ret;
 }
 EXPORT_SYMBOL_GPL(register_kretprobe);
@@ -2217,7 +2295,11 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
        for (i = 0; i < num; i++) {
                if (__unregister_kprobe_top(&rps[i]->kp) < 0)
                        rps[i]->kp.addr = NULL;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+               rethook_free(rps[i]->rh);
+#else
                rps[i]->rph->rp = NULL;
+#endif
        }
        mutex_unlock(&kprobe_mutex);
 
@@ -2225,7 +2307,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
        for (i = 0; i < num; i++) {
                if (rps[i]->kp.addr) {
                        __unregister_kprobe_bottom(&rps[i]->kp);
+#ifndef CONFIG_KRETPROBE_ON_RETHOOK
                        free_rp_inst(rps[i]);
+#endif
                }
        }
 }
index 8b2dd5b..89d9f99 100644 (file)
@@ -150,15 +150,15 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
 
        fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
        for (i = 0; i < size; i++) {
-               struct rethook_node *node;
+               struct fprobe_rethook_node *node;
 
-               node = kzalloc(sizeof(struct fprobe_rethook_node), GFP_KERNEL);
+               node = kzalloc(sizeof(*node), GFP_KERNEL);
                if (!node) {
                        rethook_free(fp->rethook);
                        fp->rethook = NULL;
                        return -ENOMEM;
                }
-               rethook_add_node(fp->rethook, node);
+               rethook_add_node(fp->rethook, &node->node);
        }
        return 0;
 }
@@ -215,7 +215,7 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter
         * correctly calculate the total number of filtered symbols
         * from both filter and notfilter.
         */
-       hash = fp->ops.local_hash.filter_hash;
+       hash = rcu_access_pointer(fp->ops.local_hash.filter_hash);
        if (WARN_ON_ONCE(!hash))
                goto out;
 
index b62fd78..47cebef 100644 (file)
@@ -1433,7 +1433,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
        fbuffer.regs = regs;
        entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
        entry->func = (unsigned long)tk->rp.kp.addr;
-       entry->ret_ip = (unsigned long)ri->ret_addr;
+       entry->ret_ip = get_kretprobe_retaddr(ri);
        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
 
        trace_event_buffer_commit(&fbuffer);
@@ -1628,7 +1628,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
                return;
 
        entry->func = (unsigned long)tk->rp.kp.addr;
-       entry->ret_ip = (unsigned long)ri->ret_addr;
+       entry->ret_ip = get_kretprobe_retaddr(ri);
        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
                              head, NULL);
index 8c3365f..b247d41 100644 (file)
@@ -68,7 +68,7 @@ int logic_iomem_add_region(struct resource *resource,
 }
 EXPORT_SYMBOL(logic_iomem_add_region);
 
-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
 static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
 {
        WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
@@ -81,7 +81,7 @@ static void real_iounmap(volatile void __iomem *addr)
        WARN(1, "invalid iounmap for addr 0x%llx\n",
             (unsigned long long)(uintptr_t __force)addr);
 }
-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
 
 void __iomem *ioremap(phys_addr_t offset, size_t size)
 {
@@ -168,7 +168,7 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
 #define MAKE_FALLBACK(op, sz)                                          \
 static u##sz real_raw_read ## op(const volatile void __iomem *addr)    \
 {                                                                      \
@@ -213,7 +213,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
        WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
             (unsigned long long)(uintptr_t __force)addr);
 }
-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
 
 #define MAKE_OP(op, sz)                                                \
 u##sz __raw_read ## op(const volatile void __iomem *addr)              \
index ce15893..cb800b1 100644 (file)
@@ -1149,6 +1149,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
        if (ret) {
                pr_err("could not register misc device: %d\n", ret);
                free_test_dev_kmod(test_dev);
+               test_dev = NULL;
                goto out;
        }
 
index 8b1c318..e77d485 100644 (file)
@@ -1463,6 +1463,25 @@ unlock:
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
+static noinline void check_create_range_5(struct xarray *xa,
+               unsigned long index, unsigned int order)
+{
+       XA_STATE_ORDER(xas, xa, index, order);
+       unsigned int i;
+
+       xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+       for (i = 0; i < order + 10; i++) {
+               do {
+                       xas_lock(&xas);
+                       xas_create_range(&xas);
+                       xas_unlock(&xas);
+               } while (xas_nomem(&xas, GFP_KERNEL));
+       }
+
+       xa_destroy(xa);
+}
+
 static noinline void check_create_range(struct xarray *xa)
 {
        unsigned int order;
@@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa)
                check_create_range_4(xa, (3U << order) + 1, order);
                check_create_range_4(xa, (3U << order) - 1, order);
                check_create_range_4(xa, (1U << 24) + 1, order);
+
+               check_create_range_5(xa, 0, order);
+               check_create_range_5(xa, (1U << order), order);
        }
 
        check_create_range_3();
index b95e925..4acc88e 100644 (file)
@@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *xas)
 
                for (;;) {
                        struct xa_node *node = xas->xa_node;
+                       if (node->shift >= shift)
+                               break;
                        xas->xa_node = xa_parent_locked(xas->xa, node);
                        xas->xa_offset = node->offset - 1;
                        if (node->offset != 0)
@@ -1079,6 +1081,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
                                        xa_mk_node(child));
                        if (xa_is_value(curr))
                                values--;
+                       xas_update(xas, child);
                } else {
                        unsigned int canon = offset - xas->xa_sibs;
 
@@ -1093,6 +1096,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
        } while (offset-- > xas->xa_offset);
 
        node->nr_values += values;
+       xas_update(xas, node);
 }
 EXPORT_SYMBOL_GPL(xas_split);
 #endif
index 907fefd..4b8eab4 100644 (file)
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue);
 
 #ifdef CONFIG_BALLOON_COMPACTION
 
-bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
+static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
 
 {
        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
@@ -217,7 +217,7 @@ bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
        return true;
 }
 
-void balloon_page_putback(struct page *page)
+static void balloon_page_putback(struct page *page)
 {
        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
        unsigned long flags;
@@ -230,7 +230,7 @@ void balloon_page_putback(struct page *page)
 
 
 /* move_to_new_page() counterpart for a ballooned page */
-int balloon_page_migrate(struct address_space *mapping,
+static int balloon_page_migrate(struct address_space *mapping,
                struct page *newpage, struct page *page,
                enum migrate_mode mode)
 {
index c1e0fed..5ce8d7c 100644 (file)
@@ -1019,12 +1019,15 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
        struct damos *s;
        unsigned long wait_time;
        unsigned long min_wait_time = 0;
+       bool init_wait_time = false;
 
        while (!kdamond_need_stop(ctx)) {
                damon_for_each_scheme(s, ctx) {
                        wait_time = damos_wmark_wait_us(s);
-                       if (!min_wait_time || wait_time < min_wait_time)
+                       if (!init_wait_time || wait_time < min_wait_time) {
+                               init_wait_time = true;
                                min_wait_time = wait_time;
+                       }
                }
                if (!min_wait_time)
                        return 0;
index 271fbe8..f598a03 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1404,6 +1404,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
+       long ret;
 
        VM_BUG_ON(!PAGE_ALIGNED(start));
        VM_BUG_ON(!PAGE_ALIGNED(end));
@@ -1438,8 +1439,10 @@ long populate_vma_page_range(struct vm_area_struct *vma,
         * We made sure addr is within a VMA, so the following will
         * not result in a stack expansion that recurses back here.
         */
-       return __get_user_pages(mm, start, nr_pages, gup_flags,
+       ret = __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
+       lru_add_drain();
+       return ret;
 }
 
 /*
@@ -1471,6 +1474,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
+       long ret;
 
        VM_BUG_ON(!PAGE_ALIGNED(start));
        VM_BUG_ON(!PAGE_ALIGNED(end));
@@ -1498,8 +1502,10 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
        if (check_vma_flags(vma, gup_flags))
                return -EINVAL;
 
-       return __get_user_pages(mm, start, nr_pages, gup_flags,
+       ret = __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
+       lru_add_drain();
+       return ret;
 }
 
 /*
index 58dc6ad..cf16280 100644 (file)
@@ -456,7 +456,8 @@ static inline void munlock_vma_page(struct page *page,
 }
 void mlock_new_page(struct page *page);
 bool need_mlock_page_drain(int cpu);
-void mlock_page_drain(int cpu);
+void mlock_page_drain_local(void);
+void mlock_page_drain_remote(int cpu);
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
@@ -539,7 +540,8 @@ static inline void munlock_vma_page(struct page *page,
                        struct vm_area_struct *vma, bool compound) { }
 static inline void mlock_new_page(struct page *page) { }
 static inline bool need_mlock_page_drain(int cpu) { return false; }
-static inline void mlock_page_drain(int cpu) { }
+static inline void mlock_page_drain_local(void) { }
+static inline void mlock_page_drain_remote(int cpu) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 {
 }
index 2f9fdfd..a203747 100644 (file)
@@ -566,6 +566,8 @@ static unsigned long kfence_init_pool(void)
         * enters __slab_free() slow-path.
         */
        for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+               struct slab *slab = page_slab(&pages[i]);
+
                if (!i || (i % 2))
                        continue;
 
@@ -573,7 +575,11 @@ static unsigned long kfence_init_pool(void)
                if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
                        return addr;
 
-               __SetPageSlab(&pages[i]);
+               __folio_set_slab(slab_folio(slab));
+#ifdef CONFIG_MEMCG
+               slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
+                                  MEMCG_DATA_OBJCGS;
+#endif
        }
 
        /*
@@ -1033,6 +1039,9 @@ void __kfence_free(void *addr)
 {
        struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
 
+#ifdef CONFIG_MEMCG
+       KFENCE_WARN_ON(meta->objcg);
+#endif
        /*
         * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
         * the object, as the object page may be recycled for other-typed
index 2a2d5de..9a6c4b1 100644 (file)
@@ -89,6 +89,9 @@ struct kfence_metadata {
        struct kfence_track free_track;
        /* For updating alloc_covered on frees. */
        u32 alloc_stack_hash;
+#ifdef CONFIG_MEMCG
+       struct obj_cgroup *objcg;
+#endif
 };
 
 extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
index 7580baa..acd7cbb 100644 (file)
@@ -796,6 +796,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
        unsigned long flags;
        struct kmemleak_object *object;
        struct kmemleak_scan_area *area = NULL;
+       unsigned long untagged_ptr;
+       unsigned long untagged_objp;
 
        object = find_and_get_object(ptr, 1);
        if (!object) {
@@ -804,6 +806,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
                return;
        }
 
+       untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+       untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
+
        if (scan_area_cache)
                area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 
@@ -815,8 +820,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
                goto out_unlock;
        }
        if (size == SIZE_MAX) {
-               size = object->pointer + object->size - ptr;
-       } else if (ptr + size > object->pointer + object->size) {
+               size = untagged_objp + object->size - untagged_ptr;
+       } else if (untagged_ptr + size > untagged_objp + object->size) {
                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
                dump_object_info(object);
                kmem_cache_free(scan_area_cache, area);
index b41858e..1873616 100644 (file)
@@ -1464,16 +1464,9 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
 
        while (iov_iter_count(&iter)) {
                iovec = iov_iter_iovec(&iter);
-               /*
-                * do_madvise returns ENOMEM if unmapped holes are present
-                * in the passed VMA. process_madvise() is expected to skip
-                * unmapped holes passed to it in the 'struct iovec' list
-                * and not fail because of them. Thus treat -ENOMEM return
-                * from do_madvise as valid and continue processing.
-                */
                ret = do_madvise(mm, (unsigned long)iovec.iov_base,
                                        iovec.iov_len, behavior);
-               if (ret < 0 && ret != -ENOMEM)
+               if (ret < 0)
                        break;
                iov_iter_advance(&iter, iovec.iov_len);
        }
index be44d0b..76e3af9 100644 (file)
@@ -3918,14 +3918,18 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
                return ret;
 
        if (unlikely(PageHWPoison(vmf->page))) {
+               struct page *page = vmf->page;
                vm_fault_t poisonret = VM_FAULT_HWPOISON;
                if (ret & VM_FAULT_LOCKED) {
+                       if (page_mapped(page))
+                               unmap_mapping_pages(page_mapping(page),
+                                                   page->index, 1, false);
                        /* Retry if a clean page was removed from the cache. */
-                       if (invalidate_inode_page(vmf->page))
-                               poisonret = 0;
-                       unlock_page(vmf->page);
+                       if (invalidate_inode_page(page))
+                               poisonret = VM_FAULT_NOPAGE;
+                       unlock_page(page);
                }
-               put_page(vmf->page);
+               put_page(page);
                vmf->page = NULL;
                return poisonret;
        }
index 3d60823..de175e2 100644 (file)
@@ -246,7 +246,7 @@ static bool remove_migration_pte(struct folio *folio,
                        set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
                }
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
 
                trace_remove_migration_pte(pvmw.address, pte_val(pte),
                                           compound_order(new));
index 529fbc1..716caf8 100644 (file)
 
 #include "internal.h"
 
-static DEFINE_PER_CPU(struct pagevec, mlock_pvec);
+struct mlock_pvec {
+       local_lock_t lock;
+       struct pagevec vec;
+};
+
+static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
+       .lock = INIT_LOCAL_LOCK(lock),
+};
 
 bool can_do_mlock(void)
 {
@@ -203,18 +210,30 @@ static void mlock_pagevec(struct pagevec *pvec)
        pagevec_reinit(pvec);
 }
 
-void mlock_page_drain(int cpu)
+void mlock_page_drain_local(void)
+{
+       struct pagevec *pvec;
+
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
+       if (pagevec_count(pvec))
+               mlock_pagevec(pvec);
+       local_unlock(&mlock_pvec.lock);
+}
+
+void mlock_page_drain_remote(int cpu)
 {
        struct pagevec *pvec;
 
-       pvec = &per_cpu(mlock_pvec, cpu);
+       WARN_ON_ONCE(cpu_online(cpu));
+       pvec = &per_cpu(mlock_pvec.vec, cpu);
        if (pagevec_count(pvec))
                mlock_pagevec(pvec);
 }
 
 bool need_mlock_page_drain(int cpu)
 {
-       return pagevec_count(&per_cpu(mlock_pvec, cpu));
+       return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
 }
 
 /**
@@ -223,7 +242,10 @@ bool need_mlock_page_drain(int cpu)
  */
 void mlock_folio(struct folio *folio)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
+
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
 
        if (!folio_test_set_mlocked(folio)) {
                int nr_pages = folio_nr_pages(folio);
@@ -236,7 +258,7 @@ void mlock_folio(struct folio *folio)
        if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
            folio_test_large(folio) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -245,9 +267,11 @@ void mlock_folio(struct folio *folio)
  */
 void mlock_new_page(struct page *page)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
        int nr_pages = thp_nr_pages(page);
 
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
        SetPageMlocked(page);
        mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
        __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
@@ -256,7 +280,7 @@ void mlock_new_page(struct page *page)
        if (!pagevec_add(pvec, mlock_new(page)) ||
            PageHead(page) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -265,8 +289,10 @@ void mlock_new_page(struct page *page)
  */
 void munlock_page(struct page *page)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
 
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
        /*
         * TestClearPageMlocked(page) must be left to __munlock_page(),
         * which will check whether the page is multiply mlocked.
@@ -276,7 +302,7 @@ void munlock_page(struct page *page)
        if (!pagevec_add(pvec, page) ||
            PageHead(page) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
index bdc8f60..2db9578 100644 (file)
@@ -1108,6 +1108,9 @@ continue_merging:
 
                buddy_pfn = __find_buddy_pfn(pfn, order);
                buddy = page + (buddy_pfn - pfn);
+
+               if (!page_is_buddy(page, buddy, order))
+                       goto done_merging;
                buddy_mt = get_pageblock_migratetype(buddy);
 
                if (migratetype != buddy_mt
@@ -8364,6 +8367,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
        struct zone *zone;
 
        lru_add_drain_cpu(cpu);
+       mlock_page_drain_remote(cpu);
        drain_pages(cpu);
 
        /*
index 5cb970d..fedb823 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1683,7 +1683,7 @@ discard:
                 */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
                folio_put(folio);
        }
 
@@ -1961,7 +1961,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                 */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
                folio_put(folio);
        }
 
index bceff0c..7e320ec 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -624,7 +624,6 @@ void lru_add_drain_cpu(int cpu)
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
        activate_page_drain(cpu);
-       mlock_page_drain(cpu);
 }
 
 /**
@@ -706,6 +705,7 @@ void lru_add_drain(void)
        local_lock(&lru_pvecs.lock);
        lru_add_drain_cpu(smp_processor_id());
        local_unlock(&lru_pvecs.lock);
+       mlock_page_drain_local();
 }
 
 /*
@@ -720,6 +720,7 @@ static void lru_add_and_bh_lrus_drain(void)
        lru_add_drain_cpu(smp_processor_id());
        local_unlock(&lru_pvecs.lock);
        invalidate_bh_lrus_cpu();
+       mlock_page_drain_local();
 }
 
 void lru_add_drain_cpu_zone(struct zone *zone)
@@ -728,6 +729,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
        lru_add_drain_cpu(smp_processor_id());
        drain_local_pages(zone);
        local_unlock(&lru_pvecs.lock);
+       mlock_page_drain_local();
 }
 
 #ifdef CONFIG_SMP
index 992b6e5..363d47f 100644 (file)
@@ -991,10 +991,6 @@ static int ax25_release(struct socket *sock)
        sock_orphan(sk);
        ax25 = sk_to_ax25(sk);
        ax25_dev = ax25->ax25_dev;
-       if (ax25_dev) {
-               dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
-               ax25_dev_put(ax25_dev);
-       }
 
        if (sk->sk_type == SOCK_SEQPACKET) {
                switch (ax25->state) {
@@ -1056,6 +1052,15 @@ static int ax25_release(struct socket *sock)
                sk->sk_state_change(sk);
                ax25_destroy_socket(ax25);
        }
+       if (ax25_dev) {
+               del_timer_sync(&ax25->timer);
+               del_timer_sync(&ax25->t1timer);
+               del_timer_sync(&ax25->t2timer);
+               del_timer_sync(&ax25->t3timer);
+               del_timer_sync(&ax25->idletimer);
+               dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
+               ax25_dev_put(ax25_dev);
+       }
 
        sock->sk   = NULL;
        release_sock(sk);
index f6f8ba1..bafb0fb 100644 (file)
@@ -1050,7 +1050,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        int noblock = flags & MSG_DONTWAIT;
        int ret = 0;
 
-       if (flags & ~(MSG_DONTWAIT | MSG_TRUNC))
+       if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
                return -EINVAL;
 
        if (!so->bound)
index 780d9e2..7056cb1 100644 (file)
@@ -1539,8 +1539,8 @@ static int clone_execute(struct datapath *dp, struct sk_buff *skb,
                                pr_warn("%s: deferred action limit reached, drop sample action\n",
                                        ovs_dp_name(dp));
                        } else {  /* Recirc action */
-                               pr_warn("%s: deferred action limit reached, drop recirc action\n",
-                                       ovs_dp_name(dp));
+                               pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
+                                       ovs_dp_name(dp), recirc_id);
                        }
                }
        }
index 5176f6c..cc282a5 100644 (file)
@@ -2230,8 +2230,8 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
                        icmpv6_key->icmpv6_type = ntohs(output->tp.src);
                        icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
 
-                       if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
-                           icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+                       if (swkey->tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+                           swkey->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
                                struct ovs_key_nd *nd_key;
 
                                nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
index 7bd6f8a..969e532 100644 (file)
@@ -777,14 +777,12 @@ void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
                       enum rxrpc_propose_ack_trace);
 void rxrpc_process_call(struct work_struct *);
 
-static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
-                                          unsigned long expire_at,
-                                          unsigned long now,
-                                          enum rxrpc_timer_trace why)
-{
-       trace_rxrpc_timer(call, why, now);
-       timer_reduce(&call->timer, expire_at);
-}
+void rxrpc_reduce_call_timer(struct rxrpc_call *call,
+                            unsigned long expire_at,
+                            unsigned long now,
+                            enum rxrpc_timer_trace why);
+
+void rxrpc_delete_call_timer(struct rxrpc_call *call);
 
 /*
  * call_object.c
@@ -808,6 +806,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
 bool __rxrpc_queue_call(struct rxrpc_call *);
 bool rxrpc_queue_call(struct rxrpc_call *);
 void rxrpc_see_call(struct rxrpc_call *);
+bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op);
 void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
 void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
 void rxrpc_cleanup_call(struct rxrpc_call *);
index df864e6..22e05de 100644 (file)
@@ -310,7 +310,7 @@ recheck_state:
        }
 
        if (call->state == RXRPC_CALL_COMPLETE) {
-               del_timer_sync(&call->timer);
+               rxrpc_delete_call_timer(call);
                goto out_put;
        }
 
index 4eb91d9..043508f 100644 (file)
@@ -53,10 +53,30 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
 
        if (call->state < RXRPC_CALL_COMPLETE) {
                trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
-               rxrpc_queue_call(call);
+               __rxrpc_queue_call(call);
+       } else {
+               rxrpc_put_call(call, rxrpc_call_put);
+       }
+}
+
+void rxrpc_reduce_call_timer(struct rxrpc_call *call,
+                            unsigned long expire_at,
+                            unsigned long now,
+                            enum rxrpc_timer_trace why)
+{
+       if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) {
+               trace_rxrpc_timer(call, why, now);
+               if (timer_reduce(&call->timer, expire_at))
+                       rxrpc_put_call(call, rxrpc_call_put_notimer);
        }
 }
 
+void rxrpc_delete_call_timer(struct rxrpc_call *call)
+{
+       if (del_timer_sync(&call->timer))
+               rxrpc_put_call(call, rxrpc_call_put_timer);
+}
+
 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
 
 /*
@@ -463,6 +483,17 @@ void rxrpc_see_call(struct rxrpc_call *call)
        }
 }
 
+bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
+{
+       const void *here = __builtin_return_address(0);
+       int n = atomic_fetch_add_unless(&call->usage, 1, 0);
+
+       if (n == 0)
+               return false;
+       trace_rxrpc_call(call->debug_id, op, n, here, NULL);
+       return true;
+}
+
 /*
  * Note the addition of a ref on a call.
  */
@@ -510,8 +541,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
        spin_unlock_bh(&call->lock);
 
        rxrpc_put_call_slot(call);
-
-       del_timer_sync(&call->timer);
+       rxrpc_delete_call_timer(call);
 
        /* Make sure we don't get any more notifications */
        write_lock_bh(&rx->recvmsg_lock);
@@ -618,6 +648,8 @@ static void rxrpc_destroy_call(struct work_struct *work)
        struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
        struct rxrpc_net *rxnet = call->rxnet;
 
+       rxrpc_delete_call_timer(call);
+
        rxrpc_put_connection(call->conn);
        rxrpc_put_peer(call->peer);
        kfree(call->rxtx_buffer);
@@ -652,8 +684,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
 
        memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
 
-       del_timer_sync(&call->timer);
-
        ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
        ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
 
index ead3471..ee269e0 100644 (file)
@@ -84,6 +84,9 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
 
        prep->payload.data[1] = (struct rxrpc_security *)sec;
 
+       if (!sec->preparse_server_key)
+               return -EINVAL;
+
        return sec->preparse_server_key(prep);
 }
 
@@ -91,7 +94,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *prep)
 {
        const struct rxrpc_security *sec = prep->payload.data[1];
 
-       if (sec)
+       if (sec && sec->free_preparse_server_key)
                sec->free_preparse_server_key(prep);
 }
 
@@ -99,7 +102,7 @@ static void rxrpc_destroy_s(struct key *key)
 {
        const struct rxrpc_security *sec = key->payload.data[1];
 
-       if (sec)
+       if (sec && sec->destroy_server_key)
                sec->destroy_server_key(key);
 }
 
index b34fca6..af040ff 100644 (file)
@@ -591,9 +591,13 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
        u32 nb_entries1 = 0, nb_entries2;
 
        if (unlikely(pool->dma_need_sync)) {
+               struct xdp_buff *buff;
+
                /* Slow path */
-               *xdp = xp_alloc(pool);
-               return !!*xdp;
+               buff = xp_alloc(pool);
+               if (buff)
+                       *xdp = buff;
+               return !!buff;
        }
 
        if (unlikely(pool->free_list_cnt)) {
index 2173a67..9717e6f 100644 (file)
@@ -40,8 +40,7 @@ include $(srctree)/scripts/Makefile.compiler
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-kbuild-file := $(if $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Kbuild,$(kbuild-dir)/Makefile)
-include $(kbuild-file)
+include $(or $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Makefile)
 
 include $(srctree)/scripts/Makefile.lib
 
index fd61753..74cb1c5 100644 (file)
@@ -12,7 +12,7 @@ include $(srctree)/scripts/Kbuild.include
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
+include $(or $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Makefile)
 
 # Figure out what we need to build from the various variables
 # ==========================================================================
index c593475..9f69ecd 100644 (file)
@@ -106,7 +106,7 @@ subdir-ym   := $(addprefix $(obj)/,$(subdir-ym))
 modname-multi = $(sort $(foreach m,$(multi-obj-ym),\
                $(if $(filter $*.o, $(call suffix-search, $m, .o, -objs -y -m)),$(m:.o=))))
 
-__modname = $(if $(modname-multi),$(modname-multi),$(basetarget))
+__modname = $(or $(modname-multi),$(basetarget))
 
 modname = $(subst $(space),:,$(__modname))
 modfile = $(addprefix $(obj)/,$(__modname))
@@ -241,20 +241,16 @@ $(foreach m, $(notdir $1), \
        $(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s)))))))
 endef
 
-quiet_cmd_copy = COPY    $@
-      cmd_copy = cp $< $@
-
-# Shipped files
+# Copy a file
 # ===========================================================================
 # 'cp' preserves permissions. If you use it to copy a file in read-only srctree,
 # the copy would be read-only as well, leading to an error when executing the
 # rule next time. Use 'cat' instead in order to generate a writable file.
-
-quiet_cmd_shipped = SHIPPED $@
-cmd_shipped = cat $< > $@
+quiet_cmd_copy = COPY    $@
+      cmd_copy = cat $< > $@
 
 $(obj)/%: $(src)/%_shipped
-       $(call cmd,shipped)
+       $(call cmd,copy)
 
 # Commands useful for building a boot image
 # ===========================================================================
@@ -431,7 +427,7 @@ MKIMAGE := $(srctree)/scripts/mkuboot.sh
 # SRCARCH just happens to match slightly more than ARCH (on sparc), so reduces
 # the number of overrides in arch makefiles
 UIMAGE_ARCH ?= $(SRCARCH)
-UIMAGE_COMPRESSION ?= $(if $(2),$(2),none)
+UIMAGE_COMPRESSION ?= $(or $(2),none)
 UIMAGE_OPTS-y ?=
 UIMAGE_TYPE ?= kernel
 UIMAGE_LOADADDR ?= arch_must_set_this
index 44e887c..2328f9a 100644 (file)
@@ -105,25 +105,6 @@ static void usage(void)
        exit(1);
 }
 
-/*
- * In the intended usage of this program, the stdout is redirected to .*.cmd
- * files. The return value of printf() must be checked to catch any error,
- * e.g. "No space left on device".
- */
-static void xprintf(const char *format, ...)
-{
-       va_list ap;
-       int ret;
-
-       va_start(ap, format);
-       ret = vprintf(format, ap);
-       if (ret < 0) {
-               perror("fixdep");
-               exit(1);
-       }
-       va_end(ap);
-}
-
 struct item {
        struct item     *next;
        unsigned int    len;
@@ -189,7 +170,7 @@ static void use_config(const char *m, int slen)
 
        define_config(m, slen, hash);
        /* Print out a dependency path from a symbol name. */
-       xprintf("    $(wildcard include/config/%.*s) \\\n", slen, m);
+       printf("    $(wildcard include/config/%.*s) \\\n", slen, m);
 }
 
 /* test if s ends in sub */
@@ -318,13 +299,13 @@ static void parse_dep_file(char *m, const char *target)
                                 */
                                if (!saw_any_target) {
                                        saw_any_target = 1;
-                                       xprintf("source_%s := %s\n\n",
-                                               target, m);
-                                       xprintf("deps_%s := \\\n", target);
+                                       printf("source_%s := %s\n\n",
+                                              target, m);
+                                       printf("deps_%s := \\\n", target);
                                }
                                is_first_dep = 0;
                        } else {
-                               xprintf("  %s \\\n", m);
+                               printf("  %s \\\n", m);
                        }
 
                        buf = read_file(m);
@@ -347,8 +328,8 @@ static void parse_dep_file(char *m, const char *target)
                exit(1);
        }
 
-       xprintf("\n%s: $(deps_%s)\n\n", target, target);
-       xprintf("$(deps_%s):\n", target);
+       printf("\n%s: $(deps_%s)\n\n", target, target);
+       printf("$(deps_%s):\n", target);
 }
 
 int main(int argc, char *argv[])
@@ -363,11 +344,22 @@ int main(int argc, char *argv[])
        target = argv[2];
        cmdline = argv[3];
 
-       xprintf("cmd_%s := %s\n\n", target, cmdline);
+       printf("cmd_%s := %s\n\n", target, cmdline);
 
        buf = read_file(depfile);
        parse_dep_file(buf, target);
        free(buf);
 
+       fflush(stdout);
+
+       /*
+        * In the intended usage, the stdout is redirected to .*.cmd files.
+        * Call ferror() to catch errors such as "No space left on device".
+        */
+       if (ferror(stdout)) {
+               fprintf(stderr, "fixdep: not all data was written to the output\n");
+               exit(1);
+       }
+
        return 0;
 }
index 7437e19..1389db7 100755 (executable)
@@ -327,7 +327,7 @@ sub output_rest {
                my @filepath = split / /, $data{$what}->{filepath};
 
                if ($enable_lineno) {
-                       printf "#define LINENO %s%s#%s\n\n",
+                       printf ".. LINENO %s%s#%s\n\n",
                               $prefix, $file[0],
                               $data{$what}->{line_no};
                }
@@ -1023,7 +1023,7 @@ logic (B<--no-rst-source>).
 
 =item B<--enable-lineno>
 
-Enable output of #define LINENO lines.
+Enable output of .. LINENO lines.
 
 =item B<--debug> I<debug level>
 
index 4577123..76cfb96 100755 (executable)
@@ -13,6 +13,7 @@ my $man;
 my $debug;
 my $arch;
 my $feat;
+my $enable_fname;
 
 my $basename = abs_path($0);
 $basename =~ s,/[^/]+$,/,;
@@ -31,6 +32,7 @@ GetOptions(
        'arch=s' => \$arch,
        'feat=s' => \$feat,
        'feature=s' => \$feat,
+       "enable-fname" => \$enable_fname,
        man => \$man
 ) or pod2usage(2);
 
@@ -95,6 +97,10 @@ sub parse_feat {
        return if ($file =~ m,($prefix)/arch-support.txt,);
        return if (!($file =~ m,arch-support.txt$,));
 
+       if ($enable_fname) {
+               printf ".. FILE %s\n", abs_path($file);
+       }
+
        my $subsys = "";
        $subsys = $2 if ( m,.*($prefix)/([^/]+).*,);
 
@@ -580,6 +586,11 @@ Output features for a single specific feature.
 Changes the location of the Feature files. By default, it uses
 the Documentation/features directory.
 
+=item B<--enable-fname>
+
+Prints the file name of the feature files. This can be used in order to
+track dependencies during documentation build.
+
 =item B<--debug>
 
 Put the script in verbose mode, useful for debugging. Can be called multiple
index 54ad86d..8caabdd 100644 (file)
@@ -108,7 +108,7 @@ static bool is_ignored_symbol(const char *name, char type)
        /* Symbol names that begin with the following are ignored.*/
        static const char * const ignored_prefixes[] = {
                "$",                    /* local symbols for ARM, MIPS, etc. */
-               ".LASANPC",             /* s390 kasan local symbols */
+               ".L",                   /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
                "__crc_",               /* modversions */
                "__efistub_",           /* arm64 EFI stub namespace */
                "__kvm_nvhe_",          /* arm64 non-VHE KVM namespace */
index d3c3a61..901835a 100644 (file)
@@ -903,19 +903,20 @@ next:
                        menu = menu->list;
                        continue;
                }
-               if (menu->next)
+
+end_check:
+               if (!menu->sym && menu_is_visible(menu) && menu != &rootmenu &&
+                   menu->prompt->type == P_MENU) {
+                       fprintf(out, "# end of %s\n", menu_get_prompt(menu));
+                       need_newline = true;
+               }
+
+               if (menu->next) {
                        menu = menu->next;
-               else while ((menu = menu->parent)) {
-                       if (!menu->sym && menu_is_visible(menu) &&
-                           menu != &rootmenu) {
-                               str = menu_get_prompt(menu);
-                               fprintf(out, "# end of %s\n", str);
-                               need_newline = true;
-                       }
-                       if (menu->next) {
-                               menu = menu->next;
-                               break;
-                       }
+               } else {
+                       menu = menu->parent;
+                       if (menu)
+                               goto end_check;
                }
        }
        fclose(out);
@@ -979,6 +980,7 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
 
        fprintf(out, "\n$(deps_config): ;\n");
 
+       fflush(out);
        ret = ferror(out); /* error check for all fprintf() calls */
        fclose(out);
        if (ret)
@@ -1097,6 +1099,7 @@ static int __conf_write_autoconf(const char *filename,
                if ((sym->flags & SYMBOL_WRITE) && sym->name)
                        print_symbol(file, sym);
 
+       fflush(file);
        /* check possible errors in conf_write_heading() and print_symbol() */
        ret = ferror(file);
        fclose(file);
index 9c084a2..7516949 100755 (executable)
@@ -424,7 +424,7 @@ sub get_kernel_version() {
 sub print_lineno {
     my $lineno = shift;
     if ($enable_lineno && defined($lineno)) {
-        print "#define LINENO " . $lineno . "\n";
+        print ".. LINENO " . $lineno . "\n";
     }
 }
 ##
@@ -2478,7 +2478,7 @@ May be specified multiple times.
 
 =item -enable-lineno
 
-Enable output of #define LINENO lines.
+Enable output of .. LINENO lines.
 
 =back
 
index 1d2d71c..9b2c492 100644 (file)
@@ -166,7 +166,7 @@ config HARDENED_USERCOPY
 config HARDENED_USERCOPY_PAGESPAN
        bool "Refuse to copy allocations that span multiple pages"
        depends on HARDENED_USERCOPY
-       depends on EXPERT
+       depends on BROKEN
        help
          When a multi-page allocation is done without __GFP_COMP,
          hardened usercopy will reject attempts to copy it. There are,
index edd9849..977d543 100644 (file)
@@ -970,6 +970,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
 
        runtime->status->state = SNDRV_PCM_STATE_OPEN;
        mutex_init(&runtime->buffer_mutex);
+       atomic_set(&runtime->buffer_accessing, 0);
 
        substream->runtime = runtime;
        substream->private_data = pcm->private_data;
index a40a35e..1fc7c50 100644 (file)
@@ -1906,11 +1906,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                if (avail >= runtime->twake)
                        break;
                snd_pcm_stream_unlock_irq(substream);
-               mutex_unlock(&runtime->buffer_mutex);
 
                tout = schedule_timeout(wait_time);
 
-               mutex_lock(&runtime->buffer_mutex);
                snd_pcm_stream_lock_irq(substream);
                set_current_state(TASK_INTERRUPTIBLE);
                switch (runtime->status->state) {
@@ -2221,7 +2219,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
 
        nonblock = !!(substream->f_flags & O_NONBLOCK);
 
-       mutex_lock(&runtime->buffer_mutex);
        snd_pcm_stream_lock_irq(substream);
        err = pcm_accessible_state(runtime);
        if (err < 0)
@@ -2276,6 +2273,10 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
                        err = -EINVAL;
                        goto _end_unlock;
                }
+               if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
+                       err = -EBUSY;
+                       goto _end_unlock;
+               }
                snd_pcm_stream_unlock_irq(substream);
                if (!is_playback)
                        snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
@@ -2284,6 +2285,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
                if (is_playback)
                        snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
                snd_pcm_stream_lock_irq(substream);
+               atomic_dec(&runtime->buffer_accessing);
                if (err < 0)
                        goto _end_unlock;
                err = pcm_accessible_state(runtime);
@@ -2313,7 +2315,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
        if (xfer > 0 && err >= 0)
                snd_pcm_update_state(substream, runtime);
        snd_pcm_stream_unlock_irq(substream);
-       mutex_unlock(&runtime->buffer_mutex);
        return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
 }
 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
index 704fdc9..4adaee6 100644 (file)
@@ -685,6 +685,24 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
        return 0;
 }
 
+/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
+ * block the further r/w operations
+ */
+static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
+{
+       if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
+               return -EBUSY;
+       mutex_lock(&runtime->buffer_mutex);
+       return 0; /* keep buffer_mutex, unlocked by below */
+}
+
+/* release buffer_mutex and clear r/w access flag */
+static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
+{
+       mutex_unlock(&runtime->buffer_mutex);
+       atomic_inc(&runtime->buffer_accessing);
+}
+
 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
 #define is_oss_stream(substream)       ((substream)->oss.oss)
 #else
@@ -695,14 +713,16 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
                             struct snd_pcm_hw_params *params)
 {
        struct snd_pcm_runtime *runtime;
-       int err = 0, usecs;
+       int err, usecs;
        unsigned int bits;
        snd_pcm_uframes_t frames;
 
        if (PCM_RUNTIME_CHECK(substream))
                return -ENXIO;
        runtime = substream->runtime;
-       mutex_lock(&runtime->buffer_mutex);
+       err = snd_pcm_buffer_access_lock(runtime);
+       if (err < 0)
+               return err;
        snd_pcm_stream_lock_irq(substream);
        switch (runtime->status->state) {
        case SNDRV_PCM_STATE_OPEN:
@@ -820,7 +840,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
                        snd_pcm_lib_free_pages(substream);
        }
  unlock:
-       mutex_unlock(&runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(runtime);
        return err;
 }
 
@@ -865,7 +885,9 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
        if (PCM_RUNTIME_CHECK(substream))
                return -ENXIO;
        runtime = substream->runtime;
-       mutex_lock(&runtime->buffer_mutex);
+       result = snd_pcm_buffer_access_lock(runtime);
+       if (result < 0)
+               return result;
        snd_pcm_stream_lock_irq(substream);
        switch (runtime->status->state) {
        case SNDRV_PCM_STATE_SETUP:
@@ -884,7 +906,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
        cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
  unlock:
-       mutex_unlock(&runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(runtime);
        return result;
 }
 
@@ -1369,12 +1391,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
 
        /* Guarantee the group members won't change during non-atomic action */
        down_read(&snd_pcm_link_rwsem);
-       mutex_lock(&substream->runtime->buffer_mutex);
+       res = snd_pcm_buffer_access_lock(substream->runtime);
+       if (res < 0)
+               goto unlock;
        if (snd_pcm_stream_linked(substream))
                res = snd_pcm_action_group(ops, substream, state, false);
        else
                res = snd_pcm_action_single(ops, substream, state);
-       mutex_unlock(&substream->runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(substream->runtime);
+ unlock:
        up_read(&snd_pcm_link_rwsem);
        return res;
 }
index b6bdebd..10112e1 100644 (file)
@@ -494,7 +494,7 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
        static int dev;
        int err;
        struct snd_card *card;
-       struct pnp_dev *cdev;
+       struct pnp_dev *cdev, *iter;
        char cid[PNP_ID_LEN];
 
        if (pnp_device_is_isapnp(pdev))
@@ -510,9 +510,11 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
        strcpy(cid, pdev->id[0].id);
        cid[5] = '1';
        cdev = NULL;
-       list_for_each_entry(cdev, &(pdev->protocol->devices), protocol_list) {
-               if (!strcmp(cdev->id[0].id, cid))
+       list_for_each_entry(iter, &(pdev->protocol->devices), protocol_list) {
+               if (!strcmp(iter->id[0].id, cid)) {
+                       cdev = iter;
                        break;
+               }
        }
        err = snd_cs423x_card_new(&pdev->dev, dev, &card);
        if (err < 0)
index 2d1fa70..74c50ec 100644 (file)
@@ -478,28 +478,29 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0A29, "Bullseye", CS8409_BULLSEYE),
        SND_PCI_QUIRK(0x1028, 0x0A2A, "Bullseye", CS8409_BULLSEYE),
        SND_PCI_QUIRK(0x1028, 0x0A2B, "Bullseye", CS8409_BULLSEYE),
+       SND_PCI_QUIRK(0x1028, 0x0A77, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A78, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A79, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7A, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7D, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7E, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7F, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A80, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AB0, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB2, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB1, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB3, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB4, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB5, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0ACF, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD0, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD1, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD2, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD3, "Dolphin", CS8409_DOLPHIN),
        SND_PCI_QUIRK(0x1028, 0x0AD9, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADA, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADB, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADC, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0AF4, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0AF5, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0BB5, "Warlock N3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0BB6, "Warlock V3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0A77, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A78, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A79, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7A, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7D, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7E, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7F, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A80, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0ADF, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AE0, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AE1, "Cyborg", CS8409_CYBORG),
@@ -512,11 +513,30 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0AEE, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AEF, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AF0, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0AD0, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD1, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD2, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD3, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0ACF, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AF4, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0AF5, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0B92, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B93, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0B94, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B95, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0B96, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B97, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BB2, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB3, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB4, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB5, "Warlock N3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0BB6, "Warlock V3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0BB8, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB9, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BBA, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BBB, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BBC, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BBD, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BD4, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD5, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD6, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD7, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD8, "Dolphin", CS8409_DOLPHIN),
        {} /* terminator */
 };
 
@@ -524,6 +544,8 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
 const struct hda_model_fixup cs8409_models[] = {
        { .id = CS8409_BULLSEYE, .name = "bullseye" },
        { .id = CS8409_WARLOCK, .name = "warlock" },
+       { .id = CS8409_WARLOCK_MLK, .name = "warlock mlk" },
+       { .id = CS8409_WARLOCK_MLK_DUAL_MIC, .name = "warlock mlk dual mic" },
        { .id = CS8409_CYBORG, .name = "cyborg" },
        { .id = CS8409_DOLPHIN, .name = "dolphin" },
        {}
@@ -542,6 +564,18 @@ const struct hda_fixup cs8409_fixups[] = {
                .chained = true,
                .chain_id = CS8409_FIXUPS,
        },
+       [CS8409_WARLOCK_MLK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = cs8409_cs42l42_pincfgs,
+               .chained = true,
+               .chain_id = CS8409_FIXUPS,
+       },
+       [CS8409_WARLOCK_MLK_DUAL_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = cs8409_cs42l42_pincfgs,
+               .chained = true,
+               .chain_id = CS8409_FIXUPS,
+       },
        [CS8409_CYBORG] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = cs8409_cs42l42_pincfgs,
index aff2b5a..343fabc 100644 (file)
@@ -733,6 +733,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
                { 0x130A, 0x00 },
                { 0x130F, 0x00 },
        };
+       int fsv_old, fsv_new;
 
        /* Bring CS42L42 out of Reset */
        gpio_data = snd_hda_codec_read(codec, CS8409_PIN_AFG, 0, AC_VERB_GET_GPIO_DATA, 0);
@@ -749,8 +750,13 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
        /* Clear interrupts, by reading interrupt status registers */
        cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
 
-       if (cs42l42->full_scale_vol)
-               cs8409_i2c_write(cs42l42, 0x2001, 0x01);
+       fsv_old = cs8409_i2c_read(cs42l42, 0x2001);
+       if (cs42l42->full_scale_vol == CS42L42_FULL_SCALE_VOL_0DB)
+               fsv_new = fsv_old & ~CS42L42_FULL_SCALE_VOL_MASK;
+       else
+               fsv_new = fsv_old & CS42L42_FULL_SCALE_VOL_MASK;
+       if (fsv_new != fsv_old)
+               cs8409_i2c_write(cs42l42, 0x2001, fsv_new);
 
        /* we have to explicitly allow unsol event handling even during the
         * resume phase so that the jack event is processed properly
@@ -906,9 +912,15 @@ static void cs8409_cs42l42_hw_init(struct hda_codec *codec)
                        cs8409_vendor_coef_set(codec, seq_bullseye->cir, seq_bullseye->coeff);
        }
 
-       /* DMIC1_MO=00b, DMIC1/2_SR=1 */
-       if (codec->fixup_id == CS8409_WARLOCK || codec->fixup_id == CS8409_CYBORG)
-               cs8409_vendor_coef_set(codec, 0x09, 0x0003);
+       switch (codec->fixup_id) {
+       case CS8409_CYBORG:
+       case CS8409_WARLOCK_MLK_DUAL_MIC:
+               /* DMIC1_MO=00b, DMIC1/2_SR=1 */
+               cs8409_vendor_coef_set(codec, CS8409_DMIC_CFG, 0x0003);
+               break;
+       default:
+               break;
+       }
 
        cs42l42_resume(cs42l42);
 
@@ -993,25 +1005,17 @@ void cs8409_cs42l42_fixups(struct hda_codec *codec, const struct hda_fixup *fix,
                cs8409_fix_caps(codec, CS8409_CS42L42_HP_PIN_NID);
                cs8409_fix_caps(codec, CS8409_CS42L42_AMIC_PIN_NID);
 
-               /* Set TIP_SENSE_EN for analog front-end of tip sense.
-                * Additionally set HSBIAS_SENSE_EN and Full Scale volume for some variants.
-                */
+               /* Set HSBIAS_SENSE_EN and Full Scale volume for some variants. */
                switch (codec->fixup_id) {
-               case CS8409_WARLOCK:
+               case CS8409_WARLOCK_MLK:
+               case CS8409_WARLOCK_MLK_DUAL_MIC:
                        spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
-                       break;
-               case CS8409_BULLSEYE:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 0;
-                       break;
-               case CS8409_CYBORG:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x00a0;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
+                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = CS42L42_FULL_SCALE_VOL_0DB;
                        break;
                default:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0003;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
+                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
+                       spec->scodecs[CS8409_CODEC0]->full_scale_vol =
+                               CS42L42_FULL_SCALE_VOL_MINUS6DB;
                        break;
                }
 
@@ -1222,6 +1226,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
                cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID);
                cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
 
+               spec->scodecs[CS8409_CODEC0]->full_scale_vol = CS42L42_FULL_SCALE_VOL_MINUS6DB;
+               spec->scodecs[CS8409_CODEC1]->full_scale_vol = CS42L42_FULL_SCALE_VOL_MINUS6DB;
+
                break;
        case HDA_FIXUP_ACT_PROBE:
                /* Fix Sample Rate to 48kHz */
index d0b725c..7df46bd 100644 (file)
@@ -235,6 +235,9 @@ enum cs8409_coefficient_index_registers {
 #define CS42L42_I2C_SLEEP_US                   (2000)
 #define CS42L42_PDN_TIMEOUT_US                 (250000)
 #define CS42L42_PDN_SLEEP_US                   (2000)
+#define CS42L42_FULL_SCALE_VOL_MASK            (2)
+#define CS42L42_FULL_SCALE_VOL_0DB             (1)
+#define CS42L42_FULL_SCALE_VOL_MINUS6DB                (0)
 
 /* Dell BULLSEYE / WARLOCK / CYBORG Specific Definitions */
 
@@ -264,6 +267,8 @@ enum cs8409_coefficient_index_registers {
 enum {
        CS8409_BULLSEYE,
        CS8409_WARLOCK,
+       CS8409_WARLOCK_MLK,
+       CS8409_WARLOCK_MLK_DUAL_MIC,
        CS8409_CYBORG,
        CS8409_FIXUPS,
        CS8409_DOLPHIN,
index c85ed7b..3e086ee 100644 (file)
@@ -1625,6 +1625,7 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        struct hda_codec *codec = per_pin->codec;
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_eld *eld = &spec->temp_eld;
+       struct device *dev = hda_codec_dev(codec);
        hda_nid_t pin_nid = per_pin->pin_nid;
        int dev_id = per_pin->dev_id;
        /*
@@ -1638,8 +1639,13 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        int present;
        int ret;
 
+#ifdef CONFIG_PM
+       if (dev->power.runtime_status == RPM_SUSPENDING)
+               return;
+#endif
+
        ret = snd_hda_power_up_pm(codec);
-       if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec)))
+       if (ret < 0 && pm_runtime_suspended(dev))
                goto out;
 
        present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
index c78f169..4e12af2 100644 (file)
@@ -3617,8 +3617,8 @@ static void alc256_shutup(struct hda_codec *codec)
        /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
         * when booting with headset plugged. So skip setting it for the codec alc257
         */
-       if (spec->codec_variant != ALC269_TYPE_ALC257 &&
-           spec->codec_variant != ALC269_TYPE_ALC256)
+       if (codec->core.vendor_id != 0x10ec0236 &&
+           codec->core.vendor_id != 0x10ec0257)
                alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
        if (!spec->no_shutup_pins)
@@ -7006,6 +7006,7 @@ enum {
        ALC287_FIXUP_LEGION_16ACHG6,
        ALC287_FIXUP_CS35L41_I2C_2,
        ALC245_FIXUP_CS35L41_SPI_2,
+       ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED,
        ALC245_FIXUP_CS35L41_SPI_4,
        ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED,
        ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED,
@@ -8771,6 +8772,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_spi_two,
        },
+       [ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs35l41_fixup_spi_two,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_HP_GPIO_LED,
+       },
        [ALC245_FIXUP_CS35L41_SPI_4] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_spi_four,
@@ -9026,7 +9033,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x89ac, "HP EliteBook 640 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ae, "HP EliteBook 650 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
-       SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -11140,6 +11147,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
index 9b263a9..4c7b5d9 100644 (file)
@@ -107,6 +107,7 @@ int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
        priv->mtkaif_protocol = mtkaif_protocol;
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_protocol);
 
 static void playback_gpio_set(struct mt6358_priv *priv)
 {
@@ -273,6 +274,7 @@ int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt)
                           1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_enable);
 
 int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
 {
@@ -296,6 +298,7 @@ int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
        capture_gpio_reset(priv);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_disable);
 
 int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
                                        int phase_1, int phase_2)
@@ -310,6 +313,7 @@ int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
                           phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_calibration_phase);
 
 /* dl pga gain */
 enum {
index 370bc79..d9a0d47 100644 (file)
@@ -462,11 +462,9 @@ static int hp_jack_event(struct notifier_block *nb, unsigned long event,
 
        if (event & SND_JACK_HEADPHONE)
                /* Disable speaker if headphone is plugged in */
-               snd_soc_dapm_disable_pin(dapm, "Ext Spk");
+               return snd_soc_dapm_disable_pin(dapm, "Ext Spk");
        else
-               snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-       return 0;
+               return snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 }
 
 static struct notifier_block hp_jack_nb = {
@@ -481,11 +479,9 @@ static int mic_jack_event(struct notifier_block *nb, unsigned long event,
 
        if (event & SND_JACK_MICROPHONE)
                /* Disable dmic if microphone is plugged in */
-               snd_soc_dapm_disable_pin(dapm, "DMIC");
+               return snd_soc_dapm_disable_pin(dapm, "DMIC");
        else
-               snd_soc_dapm_enable_pin(dapm, "DMIC");
-
-       return 0;
+               return snd_soc_dapm_enable_pin(dapm, "DMIC");
 }
 
 static struct notifier_block mic_jack_nb = {
index d3b7104..98700e7 100644 (file)
@@ -469,14 +469,14 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai,
                txcr_val = I2S_TXCR_IBM_NORMAL;
                rxcr_val = I2S_RXCR_IBM_NORMAL;
                break;
-       case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
-               txcr_val = I2S_TXCR_TFS_PCM;
-               rxcr_val = I2S_RXCR_TFS_PCM;
-               break;
-       case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
+       case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 mode */
                txcr_val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
                rxcr_val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
                break;
+       case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
+               txcr_val = I2S_TXCR_TFS_PCM;
+               rxcr_val = I2S_RXCR_TFS_PCM;
+               break;
        default:
                ret = -EINVAL;
                goto err_pm_put;
index b53f216..1724193 100644 (file)
@@ -84,6 +84,7 @@ if SND_SOC_SOF_PCI
 config SND_SOC_SOF_MERRIFIELD
        tristate "SOF support for Tangier/Merrifield"
        default SND_SOC_SOF_PCI
+       select SND_SOC_SOF_PCI_DEV
        select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
        help
          This adds support for Sound Open Firmware for Intel(R) platforms
index 9800f96..c6d2c77 100644 (file)
@@ -74,7 +74,7 @@ CFLAGS += -O2
 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
 CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS))
 CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
-       -I$(if $(OUTPUT),$(OUTPUT),.) \
+       -I$(or $(OUTPUT),.) \
        -I$(LIBBPF_INCLUDE) \
        -I$(srctree)/kernel/bpf/ \
        -I$(srctree)/tools/include \
@@ -180,7 +180,7 @@ endif
 
 $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
        $(QUIET_CLANG)$(CLANG) \
-               -I$(if $(OUTPUT),$(OUTPUT),.) \
+               -I$(or $(OUTPUT),.) \
                -I$(srctree)/tools/include/uapi/ \
                -I$(LIBBPF_BOOTSTRAP_INCLUDE) \
                -g -O2 -Wall -target bpf -c $< -o $@
index c2f43a5..290998c 100644 (file)
@@ -207,7 +207,10 @@ static void probe_unprivileged_disabled(void)
                        printf("bpf() syscall for unprivileged users is enabled\n");
                        break;
                case 1:
-                       printf("bpf() syscall restricted to privileged users\n");
+                       printf("bpf() syscall restricted to privileged users (without recovery)\n");
+                       break;
+               case 2:
+                       printf("bpf() syscall restricted to privileged users (admin can change)\n");
                        break;
                case -1:
                        printf("Unable to retrieve required privileges for bpf() syscall\n");
index 7ba7ff5..91af285 100644 (file)
@@ -477,7 +477,7 @@ static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
        codegen("\
                \n\
                __attribute__((unused)) static void                         \n\
-               %1$s__assert(struct %1$s *s)                                \n\
+               %1$s__assert(struct %1$s *s __attribute__((unused)))        \n\
                {                                                           \n\
                #ifdef __cplusplus                                          \n\
                #define _Static_assert static_assert                        \n\
index 6f11e6f..17cdf01 100644 (file)
@@ -36,7 +36,7 @@ TMP_O := $(if $(OUTPUT),$(OUTPUT)feature/,./)
 
 clean:
        $(call QUIET_CLEAN, fixdep)
-       $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+       $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
        $(Q)rm -f $(OUTPUT)fixdep
        $(call QUIET_CLEAN, feature-detect)
 ifneq ($(wildcard $(TMP_O)),)
index 5ebc195..8843f0f 100644 (file)
@@ -40,7 +40,7 @@ $(OUTPUT)counter_example: $(COUNTER_EXAMPLE)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/counter.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 4404340..d29c9c4 100644 (file)
@@ -78,7 +78,7 @@ $(OUTPUT)gpio-watch: $(GPIO_WATCH_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -f $(OUTPUT)include/linux/gpio.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index b57143d..fe770e6 100644 (file)
@@ -47,7 +47,7 @@ $(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
 
 clean:
        rm -f $(ALL_PROGRAMS)
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(sbindir); \
index 5d12ac4..fa720f0 100644 (file)
@@ -58,7 +58,7 @@ $(OUTPUT)iio_generic_buffer: $(IIO_GENERIC_BUFFER_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/iio
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 7604e7d..d14b10b 100644 (file)
@@ -3009,8 +3009,8 @@ union bpf_attr {
  *
  *                     # sysctl kernel.perf_event_max_stack=<new value>
  *     Return
- *             A non-negative value equal to or less than *size* on success,
- *             or a negative error in case of failure.
+ *             The non-negative copied *buf* length equal to or less than
+ *             *size* on success, or a negative error in case of failure.
  *
  * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
  *     Description
@@ -4316,8 +4316,8 @@ union bpf_attr {
  *
  *                     # sysctl kernel.perf_event_max_stack=<new value>
  *     Return
- *             A non-negative value equal to or less than *size* on success,
- *             or a negative error in case of failure.
+ *             The non-negative copied *buf* length equal to or less than
+ *             *size* on success, or a negative error in case of failure.
  *
  * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
  *     Description
index a13e9c7..e21e1b4 100644 (file)
@@ -60,7 +60,7 @@ $(LIBFILE): $(API_IN)
 
 clean:
        $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
-       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+       find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
 
 FORCE:
 
index b8b37fe..064c89e 100644 (file)
@@ -60,7 +60,7 @@ ifndef VERBOSE
   VERBOSE = 0
 endif
 
-INCLUDES = -I$(if $(OUTPUT),$(OUTPUT),.)                               \
+INCLUDES = -I$(or $(OUTPUT),.) \
           -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
 
 export prefix libdir src obj
index 08fe6e3..21df023 100644 (file)
@@ -153,7 +153,7 @@ $(TESTS_STATIC): $(TESTS_IN) $(LIBPERF_A) $(LIBAPI)
        $(QUIET_LINK)$(CC) -o $@ $^
 
 $(TESTS_SHARED): $(TESTS_IN) $(LIBAPI)
-       $(QUIET_LINK)$(CC) -o $@ -L$(if $(OUTPUT),$(OUTPUT),.) $^ -lperf
+       $(QUIET_LINK)$(CC) -o $@ -L$(or $(OUTPUT),.) $^ -lperf
 
 make-tests: libs $(TESTS_SHARED) $(TESTS_STATIC)
 
index 1c777a7..8f1a09c 100644 (file)
@@ -63,7 +63,7 @@ $(LIBFILE): $(SUBCMD_IN)
 
 clean:
        $(call QUIET_CLEAN, libsubcmd) $(RM) $(LIBFILE); \
-       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+       find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
 
 FORCE:
 
index 92ce4fc..0dbd397 100644 (file)
@@ -13,7 +13,7 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
 
 SUBCMD_SRCDIR          = $(srctree)/tools/lib/subcmd/
-LIBSUBCMD_OUTPUT       = $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
+LIBSUBCMD_OUTPUT       = $(or $(OUTPUT),$(CURDIR)/)
 LIBSUBCMD              = $(LIBSUBCMD_OUTPUT)libsubcmd.a
 
 OBJTOOL    := $(OUTPUT)objtool
index 4b95a51..5774477 100644 (file)
@@ -42,7 +42,7 @@ $(OUTPUT)pcitest: $(PCITEST_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 9c935f8..0520c86 100644 (file)
@@ -724,7 +724,7 @@ endif
 # get relative building directory (to $(OUTPUT))
 # and '.' if it's $(OUTPUT) itself
 __build-dir = $(subst $(OUTPUT),,$(dir $@))
-build-dir   = $(if $(__build-dir),$(__build-dir),.)
+build-dir   = $(or $(__build-dir),.)
 
 prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioctl_array) \
        $(fadvise_advice_array) \
@@ -1090,7 +1090,7 @@ bpf-skel-clean:
 
 clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean
        $(call QUIET_CLEAN, core-objs)  $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf-iostat $(LANG_BINDINGS)
-       $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+       $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
        $(Q)$(RM) $(OUTPUT).config-detected
        $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
        $(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
index d2fba12..846f785 100644 (file)
@@ -47,7 +47,7 @@ $(OUTPUT)intel-speed-select: $(ISST_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/isst_if.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index a2335e4..0efb8f2 100644 (file)
@@ -52,11 +52,17 @@ define allow-override
 endef
 
 ifneq ($(LLVM),)
-$(call allow-override,CC,clang)
-$(call allow-override,AR,llvm-ar)
-$(call allow-override,LD,ld.lld)
-$(call allow-override,CXX,clang++)
-$(call allow-override,STRIP,llvm-strip)
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+$(call allow-override,CC,$(LLVM_PREFIX)clang$(LLVM_SUFFIX))
+$(call allow-override,AR,$(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX))
+$(call allow-override,LD,$(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX))
+$(call allow-override,CXX,$(LLVM_PREFIX)clang++$(LLVM_SUFFIX))
+$(call allow-override,STRIP,$(LLVM_PREFIX)llvm-strip$(LLVM_SUFFIX))
 else
 # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
 $(call allow-override,CC,$(CROSS_COMPILE)gcc)
@@ -69,9 +75,9 @@ endif
 CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
 
 ifneq ($(LLVM),)
-HOSTAR  ?= llvm-ar
-HOSTCC  ?= clang
-HOSTLD  ?= ld.lld
+HOSTAR  ?= $(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX)
+HOSTCC  ?= $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+HOSTLD  ?= $(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX)
 else
 HOSTAR  ?= ar
 HOSTCC  ?= gcc
index c16ce83..172e472 100644 (file)
@@ -175,5 +175,5 @@ _ge-abspath = $(if $(is-executable),$(1))
 define get-executable-or-default
 $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
 endef
-_ge_attempt = $(if $(get-executable),$(get-executable),$(call _gea_err,$(2)))
+_ge_attempt = $(or $(get-executable),$(call _gea_err,$(2)))
 _gea_err  = $(if $(1),$(error Please set '$(1)' appropriately))
index 0aa6dbd..7fccd24 100644 (file)
@@ -53,9 +53,9 @@ $(OUTPUT)spidev_fdx: $(SPIDEV_FDX_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '\.*.o.d' -delete
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '\.*.o.cmd' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete
+       find $(or $(OUTPUT),.) -name '\.*.o.d' -delete
+       find $(or $(OUTPUT),.) -name '\.*.o.cmd' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index c57d9e9..5eb5c23 100644 (file)
@@ -27,7 +27,6 @@ ccflags-y += -I$(srctree)/drivers/acpi/nfit/
 obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
 obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
 obj-$(CONFIG_ND_BTT) += nd_btt.o
-obj-$(CONFIG_ND_BLK) += nd_blk.o
 obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
 obj-$(CONFIG_ACPI_NFIT) += nfit.o
 ifeq ($(CONFIG_DAX),m)
@@ -50,9 +49,6 @@ nd_pmem-y += config_check.o
 nd_btt-y := $(NVDIMM_SRC)/btt.o
 nd_btt-y += config_check.o
 
-nd_blk-y := $(NVDIMM_SRC)/blk.o
-nd_blk-y += config_check.o
-
 nd_e820-y := $(NVDIMM_SRC)/e820.o
 nd_e820-y += config_check.o
 
index 3e3a5f5..baed75e 100644 (file)
@@ -11,7 +11,6 @@ void check(void)
        BUILD_BUG_ON(!IS_MODULE(CONFIG_BLK_DEV_PMEM));
        BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BTT));
        BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_PFN));
-       BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BLK));
        if (IS_ENABLED(CONFIG_ACPI_NFIT))
                BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));
        BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX));
index 3ca7c32..4d1a947 100644 (file)
@@ -338,62 +338,6 @@ static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
        return 0;
 }
 
-static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
-               void *iobuf, u64 len, int rw)
-{
-       struct ndtest_dimm *dimm = ndbr->blk_provider_data;
-       struct ndtest_blk_mmio *mmio = dimm->mmio;
-       struct nd_region *nd_region = &ndbr->nd_region;
-       unsigned int lane;
-
-       if (!mmio)
-               return -ENOMEM;
-
-       lane = nd_region_acquire_lane(nd_region);
-       if (rw)
-               memcpy(mmio->base + dpa, iobuf, len);
-       else {
-               memcpy(iobuf, mmio->base + dpa, len);
-               arch_invalidate_pmem(mmio->base + dpa, len);
-       }
-
-       nd_region_release_lane(nd_region, lane);
-
-       return 0;
-}
-
-static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
-                                   struct device *dev)
-{
-       struct nd_blk_region *ndbr = to_nd_blk_region(dev);
-       struct nvdimm *nvdimm;
-       struct ndtest_dimm *dimm;
-       struct ndtest_blk_mmio *mmio;
-
-       nvdimm = nd_blk_region_to_dimm(ndbr);
-       dimm = nvdimm_provider_data(nvdimm);
-
-       nd_blk_region_set_provider_data(ndbr, dimm);
-       dimm->blk_region = to_nd_region(dev);
-
-       mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL);
-       if (!mmio)
-               return -ENOMEM;
-
-       mmio->base = (void __iomem *) devm_nvdimm_memremap(
-               dev, dimm->address, 12, nd_blk_memremap_flags(ndbr));
-       if (!mmio->base) {
-               dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm));
-               return -ENOMEM;
-       }
-       mmio->size = dimm->size;
-       mmio->base_offset = 0;
-
-       dimm->mmio = mmio;
-
-       return 0;
-}
-
 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
 {
        int i;
@@ -523,17 +467,16 @@ static int ndtest_create_region(struct ndtest_priv *p,
                                struct ndtest_region *region)
 {
        struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
-       struct nd_blk_region_desc ndbr_desc;
+       struct nd_region_desc *ndr_desc, _ndr_desc;
        struct nd_interleave_set *nd_set;
-       struct nd_region_desc *ndr_desc;
        struct resource res;
        int i, ndimm = region->mapping[0].dimm;
        u64 uuid[2];
 
        memset(&res, 0, sizeof(res));
        memset(&mappings, 0, sizeof(mappings));
-       memset(&ndbr_desc, 0, sizeof(ndbr_desc));
-       ndr_desc = &ndbr_desc.ndr_desc;
+       memset(&_ndr_desc, 0, sizeof(_ndr_desc));
+       ndr_desc = &_ndr_desc;
 
        if (!ndtest_alloc_resource(p, region->size, &res.start))
                return -ENOMEM;
@@ -857,10 +800,8 @@ static int ndtest_dimm_register(struct ndtest_priv *priv,
        struct device *dev = &priv->pdev.dev;
        unsigned long dimm_flags = dimm->flags;
 
-       if (dimm->num_formats > 1) {
-               set_bit(NDD_ALIASING, &dimm_flags);
+       if (dimm->num_formats > 1)
                set_bit(NDD_LABELING, &dimm_flags);
-       }
 
        if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
                set_bit(NDD_UNARMED, &dimm_flags);
index 0bc91ff..65dbdda 100644 (file)
@@ -2842,28 +2842,6 @@ static void nfit_test1_setup(struct nfit_test *t)
        set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
 }
 
-static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
-               void *iobuf, u64 len, int rw)
-{
-       struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
-       struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
-       struct nd_region *nd_region = &ndbr->nd_region;
-       unsigned int lane;
-
-       lane = nd_region_acquire_lane(nd_region);
-       if (rw)
-               memcpy(mmio->addr.base + dpa, iobuf, len);
-       else {
-               memcpy(iobuf, mmio->addr.base + dpa, len);
-
-               /* give us some some coverage of the arch_invalidate_pmem() API */
-               arch_invalidate_pmem(mmio->addr.base + dpa, len);
-       }
-       nd_region_release_lane(nd_region, lane);
-
-       return 0;
-}
-
 static unsigned long nfit_ctl_handle;
 
 union acpi_object *result;
@@ -3219,7 +3197,6 @@ static int nfit_test_probe(struct platform_device *pdev)
        nfit_test->setup(nfit_test);
        acpi_desc = &nfit_test->acpi_desc;
        acpi_nfit_desc_init(acpi_desc, &pdev->dev);
-       acpi_desc->blk_do_io = nfit_test_blk_do_io;
        nd_desc = &acpi_desc->nd_desc;
        nd_desc->provider_name = NULL;
        nd_desc->module = THIS_MODULE;
index e834a01..1604897 100644 (file)
@@ -29,11 +29,8 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
         */
        struct get_stack_trace_t e;
        int i, num_stack;
-       static __u64 cnt;
        struct ksym *ks;
 
-       cnt++;
-
        memset(&e, 0, sizeof(e));
        memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e));
 
index 36a707e..6c62bfb 100644 (file)
@@ -39,16 +39,8 @@ struct {
        __type(value, stack_trace_t);
 } stack_amap SEC(".maps");
 
-/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */
-struct random_urandom_args {
-       unsigned long long pad;
-       int got_bits;
-       int pool_left;
-       int input_left;
-};
-
-SEC("tracepoint/random/urandom_read")
-int oncpu(struct random_urandom_args *args)
+SEC("kprobe/urandom_read")
+int oncpu(struct pt_regs *args)
 {
        __u32 max_len = sizeof(struct bpf_stack_build_id)
                        * PERF_MAX_STACK_DEPTH;
index baa3e3e..aa29461 100644 (file)
@@ -209,7 +209,8 @@ static void test_lpm_order(void)
 static void test_lpm_map(int keysize)
 {
        LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
-       size_t i, j, n_matches, n_matches_after_delete, n_nodes, n_lookups;
+       volatile size_t n_matches, n_matches_after_delete;
+       size_t i, j, n_nodes, n_lookups;
        struct tlpm_node *t, *list = NULL;
        struct bpf_lpm_trie_key *key;
        uint8_t *data, *value;
index a40add3..2a2d240 100644 (file)
@@ -1,7 +1,13 @@
 # This mimics the top-level Makefile. We do it explicitly here so that this
 # Makefile can operate with or without the kbuild infrastructure.
 ifneq ($(LLVM),)
-CC := clang
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
 else
 CC := $(CROSS_COMPILE)gcc
 endif
index c969812..0b45055 100644 (file)
@@ -56,26 +56,14 @@ static void print_banner(void)
 
 static void seed_rng(void)
 {
-       int fd;
-       struct {
-               int entropy_count;
-               int buffer_size;
-               unsigned char buffer[256];
-       } entropy = {
-               .entropy_count = sizeof(entropy.buffer) * 8,
-               .buffer_size = sizeof(entropy.buffer),
-               .buffer = "Adding real entropy is not actually important for these tests. Don't try this at home, kids!"
-       };
+       int bits = 256, fd;
 
-       if (mknod("/dev/urandom", S_IFCHR | 0644, makedev(1, 9)))
-               panic("mknod(/dev/urandom)");
-       fd = open("/dev/urandom", O_WRONLY);
+       pretty_message("[+] Fake seeding RNG...");
+       fd = open("/dev/random", O_WRONLY);
        if (fd < 0)
-               panic("open(urandom)");
-       for (int i = 0; i < 256; ++i) {
-               if (ioctl(fd, RNDADDENTROPY, &entropy) < 0)
-                       panic("ioctl(urandom)");
-       }
+               panic("open(random)");
+       if (ioctl(fd, RNDADDTOENTCNT, &bits) < 0)
+               panic("ioctl(RNDADDTOENTCNT)");
        close(fd);
 }
 
@@ -270,10 +258,10 @@ static void check_leaks(void)
 
 int main(int argc, char *argv[])
 {
-       seed_rng();
        ensure_console();
        print_banner();
        mount_filesystems();
+       seed_rng();
        kmod_selftests();
        enable_logging();
        clear_leaks();
index 5a1eda6..11fb417 100644 (file)
@@ -46,7 +46,7 @@ DATADIR       :=      /usr/share
 DOCDIR :=      $(DATADIR)/doc
 MANDIR :=      $(DATADIR)/man
 LICDIR :=      $(DATADIR)/licenses
-SRCTREE        :=      $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
+SRCTREE        :=      $(or $(BUILD_SRC),$(CURDIR))
 
 # If running from the tarball, man pages are stored in the Documentation
 # dir. If running from the kernel source, man pages are stored in
index 1b128e5..c623566 100644 (file)
@@ -38,7 +38,7 @@ $(OUTPUT)ffs-test: $(FFS_TEST_IN)
 
 clean:
        rm -f $(ALL_PROGRAMS)
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.o.cmd' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.o.cmd' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 0d7bbe4..1b25cc7 100644 (file)
@@ -5,7 +5,8 @@ virtio_test: virtio_ring.o virtio_test.o
 vringh_test: vringh_test.o vringh.o virtio_ring.o
 
 CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
-LDFLAGS += -lpthread
+CFLAGS += -pthread
+LDFLAGS += -pthread
 vpath %.c ../../drivers/virtio ../../drivers/vhost
 mod:
        ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
index 8f41cd6..834a90b 100644 (file)
@@ -26,8 +26,8 @@ enum dma_data_direction {
 #define dma_map_single(d, p, s, dir) (virt_to_phys(p))
 #define dma_mapping_error(...) (0)
 
-#define dma_unmap_single(...) do { } while (0)
-#define dma_unmap_page(...) do { } while (0)
+#define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
+#define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
 
 #define dma_max_mapping_size(...) SIZE_MAX
 
index 7679335..7d98e76 100644 (file)
@@ -441,7 +441,6 @@ static void usage(void)
                "-n\t\tSort by task command name.\n"
                "-a\t\tSort by memory allocate time.\n"
                "-r\t\tSort by memory release time.\n"
-               "-c\t\tCull by comparing stacktrace instead of total block.\n"
                "-f\t\tFilter out the information of blocks whose memory has been released.\n"
                "--pid <PID>\tSelect by pid. This selects the information of blocks whose process ID number equals to <PID>.\n"
                "--tgid <TGID>\tSelect by tgid. This selects the information of blocks whose Thread Group ID number equals to <TGID>.\n"
@@ -466,14 +465,11 @@ int main(int argc, char **argv)
                { 0, 0, 0, 0},
        };
 
-       while ((opt = getopt_long(argc, argv, "acfmnprstP", longopts, NULL)) != -1)
+       while ((opt = getopt_long(argc, argv, "afmnprstP", longopts, NULL)) != -1)
                switch (opt) {
                case 'a':
                        cmp = compare_ts;
                        break;
-               case 'c':
-                       cull = cull | CULL_STACKTRACE;
-                       break;
                case 'f':
                        filter = filter | FILTER_UNRELEASE;
                        break;
index cc0d282..59d9e8b 100644 (file)
@@ -3,7 +3,7 @@
 # kbuild file for usr/ - including initramfs image
 #
 
-compress-y                                     := shipped
+compress-y                                     := copy
 compress-$(CONFIG_INITRAMFS_COMPRESSION_GZIP)  := gzip
 compress-$(CONFIG_INITRAMFS_COMPRESSION_BZIP2) := bzip2
 compress-$(CONFIG_INITRAMFS_COMPRESSION_LZMA)  := lzma
@@ -37,7 +37,7 @@ endif
 # .cpio.*, use it directly as an initramfs, and avoid double compression.
 ifeq ($(words $(subst .cpio.,$(space),$(ramfs-input))),2)
 cpio-data := $(ramfs-input)
-compress-y := shipped
+compress-y := copy
 endif
 
 endif
index 7b283d4..fa9819e 100644 (file)
@@ -10,7 +10,10 @@ UAPI_CFLAGS := -std=c90 -Wall -Werror=implicit-function-declaration
 
 # In theory, we do not care -m32 or -m64 for header compile tests.
 # It is here just because CONFIG_CC_CAN_LINK is tested with -m32 or -m64.
-UAPI_CFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+
+# USERCFLAGS might contain sysroot location for CC.
+UAPI_CFLAGS += $(USERCFLAGS)
 
 override c_flags = $(UAPI_CFLAGS) -Wp,-MMD,$(depfile) -I$(objtree)/usr/include
 
@@ -84,7 +87,7 @@ endif
 # asm-generic/*.h is used by asm/*.h, and should not be included directly
 no-header-test += asm-generic/%
 
-extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
+always-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
 
 # Include the header twice to detect missing include guard.
 quiet_cmd_hdrtest = HDRTEST $<